[ { "id": 19402, "title": "DreamClean: Restoring Clean Image Using Deep Diffusion Prior", "authors": [ "Jie Xiao", "Ruili Feng", "Han Zhang", "Zhiheng Liu", "Zhantao Yang", "Yurui Zhu", "Xueyang Fu", "Kai Zhu", "Yu Liu", "Zheng-Jun Zha" ], "abstract": "Image restoration poses a garners substantial interest due to the exponential surge in demands for recovering high-quality images from diverse mobile camera devices, adverse lighting conditions, suboptimal shooting environments, and frequent image compression for efficient transmission purposes. Yet this problem gathers significant challenges as people are blind to the type of restoration the images suffer, which, is usually the case in real-day scenarios and is most urgent to solve for this field. Current research, however, heavily relies on prior knowledge of the restoration type, either explicitly through rules or implicitly through the availability of degraded-clean image pairs to define the restoration process, and consumes considerable effort to collect image pairs of vast degradation types. This paper introduces DreamClean, a training-free method that needs no degradation prior knowledge but yields high-fidelity and generality towards various types of image degradation. DreamClean embeds the degraded image back to the latent of pre-trained diffusion models and re-sample it through a carefully designed diffusion process that mimics those generating clean images. Thanks to the rich image prior in diffusion models and our novel Variance Preservation Sampling (VPS) technique, DreamClean manages to handle various different degradation types at one time and reaches far more satisfied final quality than previous competitors. DreamClean relies on elegant theoretical supports to assure its convergence to clean image when VPS has appropriate parameters, and also enjoys superior experimental performance over various challenging tasks that could be overwhelming for previous methods when degradation prior is unavailable.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=6ALuy19mPa", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19568, "title": "ConjNorm: Tractable Density Estimation for Out-of-Distribution Detection", "authors": [ "Bo Peng", "Yadan Luo", "Yonggang Zhang", "Yixuan Li", "Zhen Fang" ], "abstract": "Post-hoc out-of-distribution (OOD) detection has garnered intensive attention in reliable machine learning. Many efforts have been dedicated to deriving score functions based on logits, distances, or rigorous data distribution assumptions to identify low-scoring OOD samples. Nevertheless, these estimate scores may fail to accurately reflect the true data density or impose impractical constraints. To provide a unified perspective on density-based score design, we propose a novel theoretical framework grounded in Bregman divergence, which extends distribution considerations to encompass an exponential family of distributions. Leveraging the conjugation constraint revealed in our theorem, we introduce a \\textsc{ConjNorm} method, reframing density function design as a search for the optimal norm coefficient $p$ against the given dataset. In light of the computational challenges of normalization, we devise an unbiased and analytically tractable estimator of the partition function using the Monte Carlo-based importance sampling technique. Extensive experiments across OOD detection benchmarks empirically demonstrate that our proposed \\textsc{ConjNorm} has established a new state-of-the-art in a variety of OOD detection setups, outperforming the current best method by up to 13.25\\% and 28.19\\% (FPR95) on CIFAR-100 and ImageNet-1K, respectively.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=1pSL2cXWoz", "arxiv_id": "2402.17888", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 18010, "title": "Self-Supervised High Dynamic Range Imaging with Multi-Exposure Images in Dynamic Scenes", "authors": [ "Zhilu Zhang", "Haoyu Wang", "Shuai Liu", "Xiaotao Wang", "LEI LEI", "Wangmeng Zuo" ], "abstract": "Merging multi-exposure images is a common approach for obtaining high dynamic range (HDR) images, with the primary challenge being the avoidance of ghosting artifacts in dynamic scenes. Recent methods have proposed using deep neural networks for deghosting. However, the methods typically rely on sufficient data with HDR ground-truths, which are difficult and costly to collect. In this work, to eliminate the need for labeled data, we propose SelfHDR, a self-supervised HDR reconstruction method that only requires dynamic multi-exposure images during training. Specifically, SelfHDR learns a reconstruction network under the supervision of two complementary components, which can be constructed from multi-exposure images and focus on HDR color as well as structure, respectively. The color component is estimated from aligned multi-exposure images, while the structure one is generated through a structure-focused network that is supervised by the color component and an input reference (\\eg, medium-exposure) image. During testing, the learned reconstruction network is directly deployed to predict an HDR image. Experiments on real-world images demonstrate our SelfHDR achieves superior results against the state-of-the-art self-supervised methods, and comparable performance to supervised ones. Codes will be publicly available.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=jjiOHEcS2c", "arxiv_id": "2310.01840", "GitHub": [ "https://github.com/cszhilu1998/SelfHDR" ], "Space": [], "Model": [], "Dataset": [] }, { "id": 19537, "title": "Ferret: Refer and Ground Anything Anywhere at Any Granularity", "authors": [ "Haoxuan You", "Haotian Zhang", "Zhe Gan", "Xianzhi Du", "Bowen Zhang", "Zirui Wang", "Liangliang Cao", "Shih-Fu Chang", "Yinfei Yang" ], "abstract": "We introduce Ferret, a new Multimodal Large Language Model (MLLM) capable of understanding spatial referring of any shape or granularity within an image and accurately grounding open-vocabulary descriptions. To unify referring and grounding in the LLM paradigm, Ferret employs a novel and powerful hybrid region representation that integrates discrete coordinates and continuous features jointly to represent a region in the image. To extract the continuous features of versatile regions, we propose a spatial-aware visual sampler, adept at handling varying sparsity across different shapes. Consequently, Ferret can accept diverse region inputs, such as points, bounding boxes, and free-form shapes. To bolster the desired capability of Ferret, we curate GRIT, a comprehensive refer-and-ground instruction tuning dataset including 1.1M samples that contain rich hierarchical spatial knowledge, with an additional 130K hard negative data to promote model robustness. The resulting model not only achieves superior performance in classical referring and grounding tasks, but also greatly outperforms existing MLLMs in region-based and localization-demanded multimodal chatting. Our evaluations also reveal a significantly improved capability of describing image details and a remarkable alleviation in object hallucination.", "type": "Spotlight Poster", "OpenReview": "https://openreview.net/forum?id=2msbbX3ydD", "arxiv_id": "2310.07704", "GitHub": [ "https://github.com/apple/ml-ferret" ], "Space": [], "Model": [], "Dataset": [] }, { "id": 18711, "title": "Overcoming the Pitfalls of Vision-Language Model Finetuning for OOD Generalization", "authors": [ "Yuhang Zang", "Hanlin Goh", "Joshua M. Susskind", "Chen Huang" ], "abstract": "Existing vision-language models exhibit strong generalization on a variety of visual domains and tasks. However, such models mainly perform zero-shot recognition in a closed-set manner, and thus struggle to handle open-domain visual concepts by design. There are recent finetuning methods, such as prompt learning, that not only study the discrimination between in-distribution (ID) and out-of-distribution (OOD) samples, but also show some improvements in both ID and OOD accuracies. In this paper, we first demonstrate that vision-language models, after long enough finetuning but without proper regularization, tend to overfit the known classes in the given dataset, with degraded performance on unknown classes. Then we propose a novel approach OGEN to address this pitfall, with the main focus on improving the OOD GENeralization of finetuned models. Specifically, a class-conditional feature generator is introduced to synthesize OOD features using just the class name of any unknown class. Such synthesized features will provide useful knowledge about unknowns and help regularize the decision boundary between ID and OOD data when optimized jointly. Equally important is our adaptive self-distillation mechanism to regularize our feature generation model during joint optimization, i.e., adaptively transferring knowledge between model states to further prevent overfitting. Experiments validate that our method yields convincing gains in OOD generalization performance in different settings.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=PKICZXVY9M", "arxiv_id": "2401.15914", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 18844, "title": "Unraveling the Key Components of OOD Generalization via Diversification", "authors": [ "Harold Luc Benoit", "Liangze Jiang", "Andrei Atanov", "Oguzhan Fatih Kar", "Mattia Rigotti", "Amir Zamir" ], "abstract": "Supervised learning datasets may contain multiple cues that explain the training set equally well, i.e., learning any of them would lead to the correct predictions on the training data. However, many of them can be spurious, i.e., lose their predictive power under a distribution shift and consequently fail to generalize to out-of-distribution (OOD) data. Recently developed \"diversification\" methods (Lee et al., 2023; Pagliardini et al., 2023) approach this problem by finding multiple diverse hypotheses that rely on different features. This paper aims to study this class of methods and identify the key components contributing to their OOD generalization abilities.We show that (1) diversification methods are highly sensitive to the distribution of the unlabeled data used for diversification and can underperform significantly when away from a method-specific sweet spot. (2) Diversification alone is insufficient for OOD generalization. The choice of the used learning algorithm, e.g., the model's architecture and pretraining, is crucial. In standard experiments (classification on Waterbirds and Office-Home datasets), using the second-best choice leads to an up to 20\\% absolute drop in accuracy. (3) The optimal choice of learning algorithm depends on the unlabeled data and vice versa i.e. they are co-dependent. (4) Finally, we show that, in practice, the above pitfalls cannot be alleviated by increasing the number of diverse hypotheses, the major feature of diversification methods.These findings provide a clearer understanding of the critical design factors influencing the OOD generalization abilities of diversification methods. They can guide practitioners in how to use the existing methods best and guide researchers in developing new, better ones.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=Lvf7GnaLru", "arxiv_id": "2312.16313", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 18067, "title": "ECoFLaP: Efficient Coarse-to-Fine Layer-Wise Pruning for Vision-Language Models", "authors": [ "Yi-Lin Sung", "Jaehong Yoon", "Mohit Bansal" ], "abstract": "Large Vision-Language Models (LVLMs) can understand the world comprehensively by integrating rich information from different modalities, achieving remarkable performance improvements on various multimodal downstream tasks. However, deploying LVLMs is often problematic due to their massive computational/energy costs and carbon consumption, making it infeasible to adopt conventional iterative global pruning, which is costly due to computing the Hessian matrix of the entire large model for sparsification. Alternatively, several studies have recently proposed layer-wise pruning approaches to avoid the expensive computation of global pruning and efficiently compress model weights according to their importance within a layer. However, these methods often suffer from suboptimal model compression due to their lack of a global perspective. To address this limitation in recent efficient pruning methods for large models, we propose Efficient Coarse-to-Fine Layer-Wise Pruning (ECoFLaP), a two-stage coarse-to-fine weight pruning approach for LVLMs. We first determine the sparsity ratios of different layers or blocks by leveraging the global importance score, which is efficiently computed based on the zeroth-order approximation of the global model gradients. Then, the multimodal model performs layer-wise unstructured weight pruning. We validate our proposed method across various multi-modal and single-modal models and datasets, demonstrating significant performance improvements over prevalent pruning techniques in the high-sparsity regime.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=iIT02bAKzv", "arxiv_id": "2310.02998", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 18228, "title": "Merge, Then Compress: Demystify Efficient SMoE with Hints from Its Routing Policy", "authors": [ "Pingzhi Li", "Zhenyu Zhang", "Prateek Yadav", "Yi-Lin Sung", "Yu Cheng", "Mohit Bansal", "Tianlong Chen" ], "abstract": "Sparsely activated Mixture-of-Experts (SMoE) has shown promise to scale up the learning capacity of neural networks, however, they have issues like: ($a$) $\\textit{High Memory Usage,}$ due to duplication of the network layers into multiple copies as experts; and ($b$) $\\textit{Redundancy in Experts,}$ as common learning-based routing policies suffer from representational collapse. Therefore, vanilla SMoE models are memory inefficient and non-scalable, especially for resource-constrained downstream scenarios. In this paper, we ask: Can we craft a compact SMoE model by consolidating expert information? What is the best recipe to merge multiple experts into fewer but more knowledgeable experts? Our pilot investigation reveals that conventional model merging methods fail to be effective in such expert merging for SMoE. The potential reasons are: ($1$) redundant information overshadows critical experts; ($2$) appropriate neuron permutation for each expert is missing to bring all of them in alignment. To address these challenges, we propose a novel merging algorithm for SMoE, $\\textit{i.e.}$, $\\texttt{M-SMoE}$, which leverages routing statistics to guide expert merging. Specifically, it starts with neuron permutation alignment for experts; then, dominant experts and their \"group members\" are formed based on routing policies; lastly, every expert group is merged into a single expert by utilizing each expert's activation frequency as their weight for merging, thus diminishing the impact of insignificant experts. Moreover, we draw an interesting observation that our proposed merging promotes a low dimensionality in the merged expert's weight space, naturally paving the way for additional compression. Hence, our final method, $\\texttt{MC-SMoE}$ ($\\textit{i.e.}$, Merge, then Compress SMoE), further decomposes the merged experts into low-rank and structural sparse alternatives. Extensive experiments across $8$ benchmarks validate the effectiveness of our proposals. For instance, our $\\texttt{MC-SMoE}$ achieves up to $80\\%$ memory and a $20\\%$ FLOPs reduction, with virtually no loss in performance. Our code is provided as supplementary material.", "type": "Spotlight Poster", "OpenReview": "https://openreview.net/forum?id=eFWG9Cy3WK", "arxiv_id": "2310.01334", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 18927, "title": "Repeated Random Sampling for Minimizing the Time-to-Accuracy of Learning", "authors": [ "Patrik Okanovic", "Roger Waleffe", "Vasilis Mageirakos", "Konstantinos Nikolakakis", "Amin Karbasi", "Dionysios Kalogerias", "Nezihe Merve G\u00fcrel", "Theodoros Rekatsinas" ], "abstract": "Methods for carefully selecting or generating a small set of training data to learn from, i.e., data pruning, coreset selection, and dataset distillation, have been shown to be effective in reducing the ever-increasing cost of training neural networks. Behind this success are rigorously designed, yet expensive, strategies for identifying the most informative training examples out of large datasets. In this work, we revisit these methods to understand if the additional computational costs associated with such strategies are justified from the perspective of time-to-accuracy, which has become a critical efficiency measure of deep neural network training over large datasets. Surprisingly, we find that many of the recently proposed methods underperform what we call Repeated Sampling of Random Subsets (RSRS or RS2), a powerful yet overlooked extension of the standard random baseline that learns from repeatedly sampled data throughout training instead of a fixed random subset. We test RS2 against thirty-two state-of-the-art data pruning and distillation methods across four datasets including ImageNet. Our results demonstrate that RS2 significantly reduces time-to-accuracy, particularly in practical regimes where accuracy, but not runtime, is similar to that of training on full dataset. For example, when training ResNet-18 on ImageNet, with 10\\% of the dataset each epoch RS2 reaches an accuracy of 66\\% versus 69\\% when training with the full dataset. The best competing method achieves only 55\\% while training 1.6$\\times$ slower than RS2. Beyond the above meta-study, we discuss the theoretical properties of RS2 such as its convergence rate and generalization error. Our primary goal is to highlight that future works that aim to minimize total training cost by using subset selection, need to consider 1) the total computation cost (including preparing the subset) and 2) should aim to outperform a simple extension of random sampling (i.e., RS2).", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=JnRStoIuTe", "arxiv_id": "2305.18424", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 17545, "title": "The Expressive Leaky Memory Neuron: an Efficient and Expressive Phenomenological Neuron Model Can Solve Long-Horizon Tasks.", "authors": [ "Aaron Spieler", "Nasim Rahaman", "Georg Martius", "Bernhard Sch\u00f6lkopf", "Anna Levina" ], "abstract": "Biological cortical neurons are remarkably sophisticated computational devices,temporally integrating their vast synaptic input over an intricate dendritic tree,subject to complex, nonlinearly interacting internal biological processes. A recentstudy proposed to characterize this complexity by fitting accurate surrogate modelsto replicate the input-output relationship of a detailed biophysical cortical pyramidalneuron model and discovered it needed temporal convolutional networks (TCN)with millions of parameters. Requiring these many parameters, however, couldbe the result of a misalignment between the inductive biases of the TCN andcortical neuron\u2019s computations. In light of this, and with the aim to explorethe computational implications of leaky memory units and nonlinear dendriticprocessing, we introduce the Expressive Leaky Memory (ELM) neuron model, abiologically inspired phenomenological model of a cortical neuron. Remarkably, byexploiting a few such slowly decaying memory-like hidden states and two-layerednonlinear integration of synaptic input, our ELM neuron can accurately matchthe aforementioned input-output relationship with under ten-thousand trainableparameters. To further assess the computational ramifications of our neuron design,we evaluate on various tasks with demanding temporal structures, including theLong Range Arena (LRA) datasets, as well as a novel neuromorphic dataset basedon the Spiking Heidelberg Digits dataset (SHD-Adding). Leveraging a largernumber of memory units with sufficiently long timescales, and correspondinglysophisticated synaptic integration, the ELM neuron proves to be competitive onboth datasets, reliably outperforming the classic Transformer or Chrono-LSTMarchitectures on latter, even solving the Pathfinder-X task with over 70\\% accuracy(16k context length). These findings indicate the importance of inductive biasesfor efficient surrogate neuron models and the potential for biologically motivatedmodels to enhance performance in challenging machine learning tasks.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=vE1e1mLJ0U", "arxiv_id": "2306.16922", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 17434, "title": "Emergent mechanisms for long timescales depend on training curriculum and affect performance in memory tasks", "authors": [ "Sina Khajehabdollahi", "Roxana Zeraati", "Emmanouil Giannakakis", "Tim Jakob Sch\u00e4fer", "Georg Martius", "Anna Levina" ], "abstract": "Recurrent neural networks (RNNs) in the brain and in silico excel at solving tasks with intricate temporal dependencies. Long timescales required for solving such tasks can arise from properties of individual neurons (single-neuron timescale, $\\tau$, e.g., membrane time constant in biological neurons) or recurrent interactions among them (network-mediated timescale). However, the contribution of each mechanism for optimally solving memory-dependent tasks remains poorly understood. Here, we train RNNs to solve $N$-parity and $N$-delayed match-to-sample tasks with increasing memory requirements controlled by $N$ by simultaneously optimizing recurrent weights and $\\tau$s. We find that for both tasks RNNs develop longer timescales with increasing $N$, but depending on the learning objective, they use different mechanisms. Two distinct curricula define learning objectives: sequential learning of a single-$N$ (single-head) or simultaneous learning of multiple $N$s (multi-head). Single-head networks increase their $\\tau$ with $N$ and are able to solve tasks for large $N$, but they suffer from catastrophic forgetting. However, multi-head networks, which are explicitly required to hold multiple concurrent memories, keep $\\tau$ constant and develop longer timescales through recurrent connectivity. Moreover, we show that the multi-head curriculum increases training speed and network stability to ablations and perturbations, and allows RNNs to generalize better to tasks beyond their training regime. This curriculum also significantly improves training GRUs and LSTMs for large-$N$ tasks. Our results suggest that adapting timescales to task requirements via recurrent interactions allows learning more complex objectives and improves the RNN's performance.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=xwKt6bUkXj", "arxiv_id": "2309.12927", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 17642, "title": "DSPy: Compiling Declarative Language Model Calls into State-of-the-Art Pipelines", "authors": [ "Omar Khattab", "Arnav Singhvi", "Paridhi Maheshwari", "Zhiyuan Zhang", "Keshav Santhanam", "Sri Vardhamanan A", "Saiful Haq", "Ashutosh Sharma", "Thomas T. Joshi", "Hanna Moazam", "Heather Miller", "Matei Zaharia", "Christopher Potts" ], "abstract": "The ML community is rapidly exploring techniques for prompting language models (LMs) and for stacking them into pipelines that solve complex tasks. Unfortunately, existing LM pipelines are typically implemented using hard-coded \u201cprompt templates\u201d, i.e. lengthy strings discovered via trial and error. Toward a more systematic approach for developing and optimizing LM pipelines, we introduce DSPy, a programming model that abstracts LM pipelines as text transformation graphs, or imperative computational graphs where LMs are invoked through declarative modules. DSPy modules are parameterized, meaning they can learn (by creating and collecting demonstrations) how to apply compositions of prompting, finetuning, augmentation, and reasoning techniques. We design a compiler that will optimize any DSPy pipeline to maximize a given metric. We conduct two case studies, showing that succinct DSPy programs can express and optimize sophisticated LM pipelines that reason about math word problems, tackle multi-hop retrieval, answer complex questions, and control agent loops. Within minutes of compiling, DSPy can automatically produce prompt pipelines and finetune pipelines that outperform out-of-the-box few-shot prompting as well as expert-created demonstrations for GPT-3.5 and Llama2-13b-chat. On top of that, DSPy programs compiled to relatively small LMs like 770M parameter T5 and Llama2- 13b-chat are competitive with many approaches that rely on large and proprietary LMs like GPT-3.5 and on expert-written prompt chains.", "type": "Spotlight Poster", "OpenReview": "https://openreview.net/forum?id=sY5N0zY5Od", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19582, "title": "Defining Expertise: Applications to Treatment Effect Estimation", "authors": [ "Alihan H\u00fcy\u00fck", "Qiyao Wei", "Alicia Curth", "Mihaela van der Schaar" ], "abstract": "Decision-makers are often experts of their domain and take actions based on their domain knowledge. Doctors, for instance, may prescribe treatments by predicting the likely outcome of each available treatment. Actions of an expert thus naturally encode part of their domain knowledge, and can help make inferences within the same domain: Knowing doctors try to prescribe the best treatment for their patients, we can tell treatments prescribed more frequently are likely to be more effective. Yet in machine learning, the fact that most decision-makers are experts is often overlooked, and \u201cexpertise\u201d is seldom leveraged as an inductive bias. This is especially true for the literature on treatment effect estimation, where often the only assumption made about actions is that of overlap. In this paper, we argue that expertise\u2014particularly the type of expertise the decision-makers of a domain are likely to have\u2014can be informative in designing and selecting methods for treatment effect estimation. We formally define two types of expertise, predictive and prognostic, and demonstrate empirically that: (i) the prominent type of expertise in a domain significantly influences the performance of different methods in treatment effect estimation, and (ii) it is possible to predict the type of expertise present in a dataset, which can provide a quantitative basis for model selection.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=1YPfmglNRU", "arxiv_id": "2403.00694", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19032, "title": "AutoChunk: Automated Activation Chunk for Memory-Efficient Deep Learning Inference", "authors": [ "Xuanlei Zhao", "Shenggan Cheng", "Guangyang LU", "Haotian Zhou", "Bin Jia", "Yang You" ], "abstract": "Large deep learning models have achieved impressive performance across a range of applications. However, their large memory requirements, including parameter memory and activation memory, have become a significant challenge for their practical serving. While existing methods mainly address parameter memory, the importance of activation memory has been overlooked. Especially for long input sequences, activation memory is expected to experience a significant exponential growth as the length of sequences increases. In this approach, we propose AutoChunk, an automatic and adaptive compiler system that efficiently reduces activation memory for long sequence inference by chunk strategies. The proposed system generates chunk plans by optimizing through multiple stages. In each stage, the chunk search pass explores all possible chunk candidates and the chunk selection pass identifies the optimal one. At runtime, AutoChunk employs code generation to automatically apply chunk strategies. The experiments demonstrate that AutoChunk can reduce over 80\\% of activation memory while maintaining speed loss within 10\\%, extend max sequence length by 3.2x to 11.7x, and outperform state-of-the-art methods by a large margin.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=GQGNLEHmdl", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 18170, "title": "Sample-efficient Learning of Infinite-horizon Average-reward MDPs with General Function Approximation", "authors": [ "Jianliang He", "Han Zhong", "Zhuoran Yang" ], "abstract": "We study infinite-horizon average-reward Markov decision processes (AMDPs) in the context of general function approximation. Specifically, we propose a novel algorithmic framework named Fixed-Point Local Optimization (FLOP), which incorporates both model-based and value-based incarnations. In particular, FLOP features a novel construction of confidence sets and a low-switching policy updating scheme, which are tailored to the average-reward and function approximation setting. Moreover, for AMDPs, we propose a novel complexity measure --- average-reward generalized eluder coefficient (AGEC) --- which captures the challenge of exploration in AMDPs with general function approximation. Such a complexity measure encompasses almost all previously known tractable AMDP models, such as linear AMDPs and linear mixture AMDPs, and also includes newly identified cases such as kernel AMDPs and AMDPs with low Bellman eluder dimensions. Using AGEC, we prove that FLOP achieves a sublinear $\\tilde{\\mathcal{O}}(\\mathrm{poly}(d, \\mathrm{sp}(v^*)) \\sqrt{T \\beta })$ regret, where $d$ and $\\beta$ correspond to AGEC and the log-covering number of the hypothesis class respectively, $\\mathrm{sp}(v^*)$ represents the span of the optimal state bias function, $T$ denotes the number of steps, and $\\tilde{\\mathcal{O}} (\\cdot) $ omits logarithmic factors. When specialized to concrete AMDP models, our regret bounds are comparable to those established by the existing algorithms designed specifically for these special cases. To the best of our knowledge, this paper presents the first comprehensive theoretical framework capable of handling nearly all AMDPs.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=fq1wNrC2ai", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19419, "title": "Towards Robust Offline Reinforcement Learning under Diverse Data Corruption", "authors": [ "Rui Yang", "Han Zhong", "Jiawei Xu", "Amy Zhang", "Chongjie Zhang", "Lei Han", "Tong Zhang" ], "abstract": "Offline reinforcement learning (RL) presents a promising approach for learning reinforced policies from offline datasets without the need for costly or unsafe interactions with the environment. However, datasets collected by humans in real-world environments are often noisy and may even be maliciously corrupted, which can significantly degrade the performance of offline RL. In this work, we first investigate the performance of current offline RL algorithms under comprehensive data corruption, including states, actions, rewards, and dynamics. Our extensive experiments reveal that implicit Q-learning (IQL) demonstrates remarkable resilience to data corruption among various offline RL algorithms. Furthermore, we conduct both empirical and theoretical analyses to understand IQL's robust performance, identifying its supervised policy learning scheme as the key factor. Despite its relative robustness, IQL still suffers from heavy-tail targets of Q functions under dynamics corruption. To tackle this challenge, we draw inspiration from robust statistics to employ the Huber loss to handle the heavy-tailedness and utilize quantile estimators to balance penalization for corrupted data and learning stability. By incorporating these simple yet effective modifications into IQL, we propose a more robust offline RL approach named Robust IQL (RIQL). Extensive experiments demonstrate that RIQL exhibits highly robust performance when subjected to diverse data corruption scenarios.", "type": "Spotlight Poster", "OpenReview": "https://openreview.net/forum?id=5hAMmCU0bK", "arxiv_id": "2310.12955", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19401, "title": "Enhancing Tail Performance in Extreme Classifiers by Label Variance Reduction", "authors": [ "Anirudh Buvanesh", "Rahul Chand", "Jatin Prakash", "Bhawna Paliwal", "Mudit Dhawan", "Neelabh Madan", "Deepesh Hada", "Vidit Jain", "SONU MEHTA", "Yashoteja Prabhu", "Manish Gupta", "Ramachandran Ramjee", "Manik Varma" ], "abstract": "Extreme Classification (XC) architectures, which utilize a massive one-vs-all classifier layer at the output, have demonstrated remarkable performance on problems with large label sets. Nonetheless, these have also been observed to falter on tail labels with few representative samples. This phenomenon has been attributed to factors such as classifier over-fitting and missing label bias, and solutions involving regularization and loss re-calibration have been developed.This paper explores the impact of label variance, a previously unexamined factor, on the tail performance in extreme classifiers. Label variance refers to the imprecision introduced in the ground truth when sampling it from a complex underlying distribution - a common phenomenon in most XC datasets. This compromises the quality of trained models, with a pronounced impact on the classifiers for infrequently sampled tail labels.This paper presents a method to systematically reduce label variance in XC by effectively utilizing the capabilities of an additional, tail-robust teacher model. It proposes a principled knowledge distillation framework, \\model, which enhances tail performance in extreme classifiers, with formal guarantees on generalization. Finally, we introduce an effective instantiation of this framework that employs a specialized Siamese teacher model. This model excels in tail accuracy and significantly enhances the quality of student one-vs-all classifiers.Comprehensive experiments are conducted on a diverse set of XC datasets which demonstrate that \\model can enhance tail performance by around 5\\% and 6\\% points in PSP and Coverage metrics respectively when integrated with leading extreme classifiers. Moreover, when added to the top-performing Ren\u00e9e classifier, it establishes a new state-of-the-art. Extensive ablations and analysis substantiate the efficacy of our design choices. Code and datasets will be released for research purposes.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=6ARlSgun7J", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19786, "title": "Multi-granularity Correspondence Learning from Long-term Noisy Videos", "authors": [ "Yijie Lin", "Jie Zhang", "Zhenyu Huang", "Jia Liu", "zujie wen", "Xi Peng" ], "abstract": "Existing video-language studies mainly focus on learning short video clips, leaving long-term temporal dependencies rarely explored due to over-high computational cost of modeling long videos. To address this issue, one feasible solution is learning the correspondence between video clips and captions, which however inevitably encounters the multi-granularity noisy correspondence (MNC) problem. To be specific, MNC refers to the clip-caption misalignment (coarse-grained) and frame-word misalignment (fine-grained), hindering temporal learning and video understanding. In this paper, we propose NOise Robust Temporal Optimal traNsport (Norton) that addresses MNC in a unified optimal transport (OT) framework. In brief, Norton employs video-paragraph and clip-caption contrastive losses to capture long-term dependencies based on OT. To address coarse-grained misalignment in video-paragraph contrast, Norton filters out the irrelevant clips and captions through an alignable prompt bucket and realigns asynchronous clip-caption pairs based on transport distance. To address the fine-grained misalignment, Norton incorporates a soft-maximum operator to identify crucial words and key frames. Additionally, Norton exploits the potential faulty negative samples in clip-caption contrast by rectifying the alignment target with OT assignment to ensure precise temporal modeling. Extensive experiments on video retrieval, videoQA, and action segmentation verify the effectiveness of our method. Code is available at https://lin-yijie.github.io/projects/Norton.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=9Cu8MRmhq2", "arxiv_id": "2401.16702", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19737, "title": "Multi-Source Diffusion Models for Simultaneous Music Generation and Separation", "authors": [ "Giorgio Mariani", "Irene Tallini", "Emilian Postolache", "Michele Mancusi", "Luca Cosmo", "Emanuele Rodol\u00e0" ], "abstract": "In this work, we define a diffusion-based generative model capable of both music generation and source separation by learning the score of the joint probability density of sources sharing a context. Alongside the classic total inference tasks (i.e., generating a mixture, separating the sources), we also introduce and experiment on the partial generation task of source imputation, where we generate a subset of the sources given the others (e.g., play a piano track that goes well with the drums). Additionally, we introduce a novel inference method for the separation task based on Dirac likelihood functions. We train our model on Slakh2100, a standard dataset for musical source separation, provide qualitative results in the generation settings, and showcase competitive quantitative results in the source separation setting. Our method is the first example of a single model that can handle both generation and separation tasks, thus representing a step toward general audio models.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=h922Qhkmx1", "arxiv_id": "2302.02257", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19031, "title": "Reward-Consistent Dynamics Models are Strongly Generalizable for Offline Reinforcement Learning", "authors": [ "Fan-Ming Luo", "Tian Xu", "Xingchen Cao", "Yang Yu" ], "abstract": "Learning a precise dynamics model can be crucial for offline reinforcement learning, which, unfortunately, has been found to be quite challenging. Dynamics models that are learned by fitting historical transitions often struggle to generalize to unseen transitions. In this study, we identify a hidden but pivotal factor termed \\emph{dynamics reward} that remains consistent across transitions, offering a pathway to better generalization. Therefore, we propose the idea of reward-consistent dynamics models: any trajectory generated by the dynamics model should maximize the dynamics reward derived from the data. We implement this idea as the MOREC (Model-based Offline reinforcement learning with Reward Consistency) method, which can be seamlessly integrated into previous offline model-based reinforcement learning (MBRL) methods. MOREC learns a generalizable dynamics reward function from offline data, which is subsequently employed as a transition filter in any offline MBRL method: when generating transitions, the dynamics model generates a batch of transitions and selects the one with the highest dynamics reward value. On a synthetic task, we visualize that MOREC has a strong generalization ability and can surprisingly recover some distant unseen transitions. On 21 offline tasks in D4RL and NeoRL benchmarks, MOREC improves the previous state-of-the-art performance by a significant margin, i.e., 4.6\\% on D4RL tasks and 25.9\\% on NeoRL tasks. Notably, MOREC is the first method that can achieve above 95\\% online RL performance in 6 out of 12 D4RL tasks and 3 out of 9 NeoRL tasks.", "type": "Spotlight Poster", "OpenReview": "https://openreview.net/forum?id=GSBHKiw19c", "arxiv_id": "2310.05422", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19738, "title": "W\u00fcrstchen: An Efficient Architecture for Large-Scale Text-to-Image Diffusion Models", "authors": [ "Pablo Pernias", "Dominic Rampas", "Mats Leon Richter", "Christopher Pal", "Marc Aubreville" ], "abstract": "We introduce W\u00fcrstchen, a novel architecture for text-to-image synthesis that combines competitive performance with unprecedented cost-effectiveness for large-scale text-to-image diffusion models.A key contribution of our work is to develop a latent diffusion technique in which we learn a detailed but extremely compact semantic image representation used to guide the diffusion process. This highly compressed representation of an image provides much more detailed guidance compared to latent representations of language and this significantly reduces the computational requirements to achieve state-of-the-art results. Our approach also improves the quality of text-conditioned image generation based on our user preference study.The training requirements of our approach consists of 24,602 A100-GPU hours - compared to Stable Diffusion 2.1's 200,000 GPU hours. Our approach also requires less training data to achieve these results. Furthermore, our compact latent representations allows us to perform inference over twice as fast, slashing the usual costs and carbon footprint of a state-of-the-art (SOTA) diffusion model significantly, without compromising the end performance. In a broader comparison against SOTA models our approach is substantially more efficient and compares favourably in terms of image quality.We believe that this work motivates more emphasis on the prioritization of both performance and computational accessibility.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=gU58d5QeGv", "arxiv_id": "2306.00637", "GitHub": [], "Space": [ "warp-ai/Wuerstchen" ], "Model": [ "warp-ai/wuerstchen" ], "Dataset": [] }, { "id": 19583, "title": "A Variational Perspective on Solving Inverse Problems with Diffusion Models", "authors": [ "Morteza Mardani", "Jiaming Song", "Jan Kautz", "Arash Vahdat" ], "abstract": "Diffusion models have emerged as a key pillar of foundation models in visual domains. One of their critical applications is to universally solve different downstream inverse tasks via a single diffusion prior without re-training for each task. Most inverse tasks can be formulated as inferring a posterior distribution over data (e.g., a full image) given a measurement (e.g., a masked image). This is however challenging in diffusion models since the nonlinear and iterative nature of the diffusion process renders the posterior intractable. To cope with this challenge, we propose a variational approach that by design seeks to approximate the true posterior distribution. We show that our approach naturally leads to regularization by denoising diffusion process (RED-diff) where denoisers at different timesteps concurrently impose different structural constraints over the image. To gauge the contribution of denoisers from different timesteps, we propose a weighting mechanism based on signal-to-noise-ratio (SNR). Our approach provides a new variational perspective for solving inverse problems with diffusion models, allowing us to formulate sampling as stochastic optimization, where one can simply apply off-the-shelf solvers with lightweight iterates. Our experiments for image restoration tasks such as inpainting and superresolution demonstrate the strengths of our method compared with state-of-the-art sampling-based diffusion models.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=1YO4EE3SPB", "arxiv_id": "2305.04391", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19589, "title": "MAMBA: an Effective World Model Approach for Meta-Reinforcement Learning", "authors": [ "Zohar Rimon", "Tom Jurgenson", "Orr Krupnik", "Gilad Adler", "Aviv Tamar" ], "abstract": "Meta-reinforcement learning (meta-RL) is a promising framework for tackling challenging domains requiring efficient exploration. Existing meta-RL algorithms are characterized by low sample efficiency, and mostly focus on low-dimensional task distributions. In parallel, model-based RL methods have been successful in solving partially observable MDPs, of which meta-RL is a special case.In this work, we leverage this success and propose a new model-based approach to meta-RL, based on elements from existing state-of-the-art model-based and meta-RL methods. We demonstrate the effectiveness of our approach on common meta-RL benchmark domains, attaining greater return with better sample efficiency (up to $15\\times$) while requiring very little hyperparameter tuning. In addition, we validate our approach on a slate of more challenging, higher-dimensional domains, taking a step towards real-world generalizing agents.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=1RE0H6mU7M", "arxiv_id": "2403.09859", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 18279, "title": "Procedural Fairness Through Decoupling Objectionable Data Generating Components", "authors": [ "Zeyu Tang", "Jialu Wang", "Yang Liu", "Peter Spirtes", "Kun Zhang" ], "abstract": "We reveal and address the frequently overlooked yet important issue of _disguised procedural unfairness_, namely, the potentially inadvertent alterations on the behavior of neutral (i.e., not problematic) aspects of data generating process, and/or the lack of procedural assurance of the greatest benefit of the least advantaged individuals. Inspired by John Rawls's advocacy for _pure procedural justice_ (Rawls, 1971; 2001), we view automated decision-making as a microcosm of social institutions, and consider how the data generating process itself can satisfy the requirements of procedural fairness. We propose a framework that decouples the objectionable data generating components from the neutral ones by utilizing reference points and the associated value instantiation rule. Our findings highlight the necessity of preventing _disguised procedural unfairness_, drawing attention not only to the objectionable data generating components that we aim to mitigate, but also more importantly, to the neutral components that we intend to keep unaffected.", "type": "Spotlight Poster", "OpenReview": "https://openreview.net/forum?id=cxfPefbu1s", "arxiv_id": "2311.14688", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 18380, "title": "Kalman Filter for Online Classification of Non-Stationary Data", "authors": [ "Michalis Titsias", "Alexandre Galashov", "Amal Rannen-Triki", "Razvan Pascanu", "Yee Whye Teh", "Jorg Bornschein" ], "abstract": "In Online Continual Learning (OCL) a learning system receives a stream of data and sequentially performs prediction and training steps. Important challenges in OCL are concerned with automatic adaptation to the particular non-stationary structure of the data, and with quantification of predictive uncertainty. Motivated by these challenges we introduce a probabilistic Bayesian online learning model by using a (possibly pretrained) neural representation and a state space model over the linear predictor weights. Non-stationarity over the linear predictor weights is modelled using a \u201cparameter drift\u201d transition density, parametrized by a coefficient that quantifies forgetting. Inference in the model is implemented with efficient Kalman filter recursions which track the posterior distribution over the linear weights, while online SGD updates over the transition dynamics coefficient allows to adapt to the non-stationarity seen in data. While the framework is developed assuming a linear Gaussian model, we also extend it to deal with classification problems and for fine-tuning the deep learning representation. In a set of experiments in multi-class classification using data sets such as CIFAR-100 and CLOC we demonstrate the predictive ability of the model and its flexibility to capture non-stationarity.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=ZzmKEpze8e", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 17524, "title": "CODE REPRESENTATION LEARNING AT SCALE", "authors": [ "Dejiao Zhang", "Wasi Uddin Ahmad", "Ming Tan", "Hantian Ding", "Ramesh Nallapati", "Dan Roth", "Xiaofei Ma", "Bing Xiang" ], "abstract": "Recent studies have shown that code language model at scale demonstrate significant performance gains on downstream tasks, i.e., code generation. However, most of the existing works on code representation learning train models at a hundred million parameter scale using very limited pretraining corpora. In this work, we fuel code representation learning with a vast amount of code data via a two-stage pretraining scheme. We first train the encoders via a mix that leverages both randomness in masking language modeling and the structure aspect of programming language. We then enhance the representations via contrastive learning with hard negative and hard positive constructed in an unsupervised manner. We establish an off-the-shelf encoder model that persistently outperforms the existing models on a wide variety of downstream tasks by large margins. To comprehend the factors contributing to successful code representation learning, we conduct detailed ablations and share our findings on (i) a customized and effective token-level denoising scheme for source code; (ii) the importance of hard negatives and hard positives; (iii) how the proposed bimodal contrastive learning boost the cross-lingual semantic search performance; and (iv) how the pretraining schemes decide the downstream task performance scales with the model size.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=vfzRRjumpX", "arxiv_id": "2402.01935", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 17610, "title": "Uni-O4: Unifying Online and Offline Deep Reinforcement Learning with Multi-Step On-Policy Optimization", "authors": [ "Kun LEI", "Zhengmao He", "Chenhao Lu", "Kaizhe Hu", "Yang Gao", "Huazhe Xu" ], "abstract": "Combining offline and online reinforcement learning (RL) is crucial for efficient and safe learning. However, previous approaches treat offline and online learning as separate procedures, resulting in redundant designs and limited performance. We ask: *Can we achieve straightforward yet effective offline and online learning without introducing extra conservatism or regularization?* In this study, we propose Uni-O4, which utilizes an on-policy objective for both offline and online learning. Owning to the alignment of objectives in two phases, the RL agent can transfer between offline and online learning seamlessly. This property enhances the flexibility of the learning paradigm, allowing for arbitrary combinations of pretraining, fine-tuning, offline, and online learning. In the offline phase, specifically, Uni-O4 leverages diverse ensemble policies to address the mismatch issues between the estimated behavior policy and the offline dataset. Through a simple offline policy evaluation (OPE) approach, Uni-O4 can achieve multi-step policy improvement safely. We demonstrate that by employing the method above, the fusion of these two paradigms can yield superior offline initialization as well as stable and rapid online fine-tuning capabilities. Through real-world robot tasks, we highlight the benefits of this paradigm for rapid deployment in challenging, previously unseen real-world environments. Additionally, through comprehensive evaluations using numerous simulated benchmarks, we substantiate that our method achieves state-of-the-art performance in both offline and offline-to-online fine-tuning learning. [Our website](uni-o4.github.io)", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=tbFBh3LMKi", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 17987, "title": "Selective Visual Representations Improve Convergence and Generalization for Embodied AI", "authors": [ "Ainaz Eftekhar", "Kuo-Hao Zeng", "Jiafei Duan", "Ali Farhadi", "Aniruddha Kembhavi", "Ranjay Krishna" ], "abstract": "Embodied AI models often employ off the shelf vision backbones like CLIP to encode their visual observations. Although such general purpose representations encode rich syntactic and semantic information about the scene, much of this information is often irrelevant to the specific task at hand. This introduces noise within the learning process and distracts the agent's focus from task-relevant visual cues.Inspired by selective attention in humans\u2014the process through which people filter their perception based on their experiences, knowledge, and the task at hand\u2014we introduce a parameter-efficient approach to filter visual stimuli for embodied AI.Our approach induces a task-conditioned bottleneck using a small learnable codebook module. This codebook is trained jointly to optimize task reward and acts as a task-conditioned selective filter over the visual observation.Our experiments showcase state-of-the-art performance for object goal navigation and object displacement across $5$ benchmarks, ProcTHOR, ArchitecTHOR, RoboTHOR, AI2-iTHOR, and ManipulaTHOR. The filtered representations produced by the codebook are also able generalize better and converge faster when adapted to other simulation environments such as Habitat. Our qualitative analyses show that agents explore their environments more effectively and their representations retain task-relevant information like target object recognition while ignoring superfluous information about other objects.", "type": "Spotlight Poster", "OpenReview": "https://openreview.net/forum?id=kC5nZDU5zf", "arxiv_id": "2311.04193", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19723, "title": "How I Warped Your Noise: a Temporally-Correlated Noise Prior for Diffusion Models", "authors": [ "Pascal Chang", "Jingwei Tang", "Markus Gross", "Vinicius C. Azevedo" ], "abstract": "Video editing and generation methods often rely on pre-trained image-based diffusion models. During the diffusion process, however, the reliance on rudimentary noise sampling techniques that do not preserve correlations present in subsequent frames of a video is detrimental to the quality of the results. This either produces high-frequency flickering, or texture-sticking artifacts that are not amenable to post-processing. With this in mind, we propose a novel method for preserving temporal correlations in a sequence of noise samples. This approach is materialized by a novel noise representation, dubbed $\\int$-noise (integral noise), that reinterprets individual noise samples as a continuously integrated noise field: pixel values do not represent discrete values, but are rather the integral of an underlying infinite-resolution noise over the pixel area. Additionally, we propose a carefully tailored transport method that uses $\\int$-noise to accurately advect noise samples over a sequence of frames, maximizing the correlation between different frames while also preserving the noise properties. Our results demonstrate that the proposed $\\int$-noise can be used for a variety of tasks, such as video restoration, surrogate rendering, and conditional video generation.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=pzElnMrgSD", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19029, "title": "DiffAR: Denoising Diffusion Autoregressive Model for Raw Speech Waveform Generation", "authors": [ "Roi Benita", "Michael Elad", "Joseph Keshet" ], "abstract": "Diffusion models have recently been shown to be relevant for high-quality speech generation. Most work has been focused on generating spectrograms, and as such, they further require a subsequent model to convert the spectrogram to a waveform (i.e., a vocoder). This work proposes a diffusion probabilistic end-to-end model for generating a raw speech waveform. The proposed model is autoregressive, generating overlapping frames sequentially, where each frame is conditioned on a portion of the previously generated one. Hence, our model can effectively synthesize an unlimited speech duration while preserving high-fidelity synthesis and temporal coherence. We implemented the proposed model for unconditional and conditional speech generation, where the latter can be driven by an input sequence of phonemes, amplitudes, and pitch values. Working on the waveform directly has some empirical advantages. Specifically, it allows the creation of local acoustic behaviors, like vocal fry, which makes the overall waveform sounds more natural. Furthermore, the proposed diffusion model is stochastic and not deterministic; therefore, each inference generates a slightly different waveform variation, enabling abundance of valid realizations. Experiments show that the proposed model generates speech with superior quality compared with other state-of-the-art neural speech generation systems.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=GTk0AdOYLq", "arxiv_id": "2310.01381", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 18004, "title": "Deep Reinforcement Learning Guided Improvement Heuristic for Job Shop Scheduling", "authors": [ "Cong Zhang", "Zhiguang Cao", "Wen Song", "Yaoxin Wu", "Jie Zhang" ], "abstract": "Recent studies in using deep reinforcement learning (DRL) to solve Job-shop scheduling problems (JSSP) focus on construction heuristics. However, their performance is still far from optimality, mainly because the underlying graph representation scheme is unsuitable for modelling partial solutions at each construction step. This paper proposes a novel DRL-guided improvement heuristic for solving JSSP, where graph representation is employed to encode complete solutions. We design a Graph-Neural-Network-based representation scheme, consisting of two modules to effectively capture the information of dynamic topology and different types of nodes in graphs encountered during the improvement process. To speed up solution evaluation during improvement, we present a novel message-passing mechanism that can evaluate multiple solutions simultaneously. We prove that the computational complexity of our method scales linearly with problem size. Experiments on classic benchmarks show that the improvement policy learned by our method outperforms state-of-the-art DRL-based methods by a large margin.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=jsWCmrsHHs", "arxiv_id": "2211.10936", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 18303, "title": "Label-Focused Inductive Bias over Latent Object Features in Visual Classification", "authors": [ "Ilmin Kang", "HyounYoung Bae", "Kangil Kim" ], "abstract": "Most neural networks for classification primarily learn features differentiated by input-domain related information such as visual similarity of objects in an image. While this focus is natural behavior, it can inadvertently introduce an inductive bias that conflicts with unseen relations in an implicit output-domain determined by human labeling based on their own world knowledge. Such conflicts can limit generalization of models by potential dominance of the input-domain focused bias in inference.To overcome this limitation without external resources, we introduce Output-Domain focused Biasing (ODB) training strategy that constructs inductive biases on features differentiated by only output labels. It has four steps: 1) it learns intermediate latent object features in an unsupervised manner; 2) it decouples their visual dependencies by assigning new independent embedding parameters; 3) it captures structured features optimized for the original classification task; and 4) it integrates the structured features with the original visual features for the final prediction.We implement the ODB on a vision transformer architecture, and achieved significant improvements on image classification benchmarks. This paper offers a straightforward and effective method to obtain and utilize output-domain focused inductive bias for classification mapping two different domains.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=cH3oufN8Pl", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19028, "title": "DreamFlow: High-quality text-to-3D generation by Approximating Probability Flow", "authors": [ "Kyungmin Lee", "Kihyuk Sohn", "Jinwoo Shin" ], "abstract": "Recent progress in text-to-3D generation has been achieved through the utilization of score distillation methods: they make use of the pre-trained text-to-image (T2I) diffusion models by distilling via the diffusion model training objective. However, such an approach inevitably results in the use of random timesteps at each update, which increases the variance of the gradient and ultimately prolongs the optimization process. In this paper, we propose to enhance the text-to-3D optimization by leveraging the T2I diffusion prior in the generative sampling process with a predetermined timestep schedule. To this end, we interpret text-to-3D optimization as a multi-view image-to-image translation problem, and propose a solution by approximating the probability flow. By leveraging the proposed novel optimization algorithm, we design DreamFlow, a practical three-stage coarse-to-fine text-to-3D optimization framework that enables fast generation of high-quality and high-resolution (i.e., 1024\u00d71024) 3D contents. For example, we demonstrate that DreamFlow is 5 times faster than the existing state-of-the-art text-to-3D method, while producing more photorealistic 3D contents.", "type": "Spotlight Poster", "OpenReview": "https://openreview.net/forum?id=GURqUuTebY", "arxiv_id": "2403.14966", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 17891, "title": "Analyzing Feed-Forward Blocks in Transformers through the Lens of Attention Map", "authors": [ "Goro Kobayashi", "Tatsuki Kuribayashi", "Sho Yokoi", "Kentaro Inui" ], "abstract": "Given that Transformers are ubiquitous in wide tasks, interpreting their internals is a pivotal issue. Still, their particular components, feed-forward (FF) blocks, have typically been less analyzed despite their substantial parameter amounts.We analyze the input contextualization effects of FF blocks by rendering them in the attention maps as a human-friendly visualization scheme.Our experiments with both masked- and causal-language models reveal that FF networks modify the input contextualization to emphasize specific types of linguistic compositions. In addition, FF and its surrounding components tend to cancel out each other's effects, suggesting potential redundancy in the processing of the Transformer layer.", "type": "Spotlight Poster", "OpenReview": "https://openreview.net/forum?id=mYWsyTuiRp", "arxiv_id": "2302.00456", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19026, "title": "Rethinking Information-theoretic Generalization: Loss Entropy Induced PAC Bounds", "authors": [ "Yuxin Dong", "Tieliang Gong", "Hong Chen", "Shujian Yu", "Chen Li" ], "abstract": "Information-theoretic generalization analysis has achieved astonishing success in characterizing the generalization capabilities of noisy and iterative learning algorithms. However, current advancements are mostly restricted to average-case scenarios and necessitate the stringent bounded loss assumption, leaving a gap with regard to computationally tractable PAC generalization analysis, especially for long-tailed loss distributions. In this paper, we bridge this gap by introducing a novel class of PAC bounds through leveraging loss entropies. These bounds simplify the computation of key information metrics in previous PAC information-theoretic bounds to one-dimensional variables, thereby enhancing computational tractability. Moreover, our data-independent bounds provide novel insights into the generalization behavior of the minimum error entropy criterion, while our data-dependent bounds improve over previous results by alleviating the bounded loss assumption under both leave-one-out and supersample settings. Extensive numerical studies indicate strong correlations between the generalization error and the induced loss entropy, showing that the presented bounds adeptly capture the patterns of the true generalization gap under various learning scenarios.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=GWSIo2MzuH", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19716, "title": "Batched Low-Rank Adaptation of Foundation Models", "authors": [ "Yeming Wen", "Swarat Chaudhuri" ], "abstract": "Low-Rank Adaptation (LoRA) has recently gained attention for fine-tuning foundation models by incorporating trainable low-rank matrices, thereby reducing the number of trainable parameters. While \\lora/ offers numerous advantages, its applicability for real-time serving to a diverse and global user base is constrained by its incapability to handle multiple task-specific adapters efficiently. This imposes a performance bottleneck in scenarios requiring personalized, task-specific adaptations for each incoming request.To address this, we introduce FLORA (Fast LoRA), a framework in which each input example in a minibatch can be associated with its unique low-rank adaptation weights, allowing for efficient batching of heterogeneous requests. We empirically demonstrate that \\flora/ retains the performance merits of \\lora/, showcasing competitive results on the MultiPL-E code generation benchmark spanning over 8 languages and a multilingual speech recognition task across 6 languages.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=w4abltTZ2f", "arxiv_id": "2312.05677", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 18007, "title": "COPlanner: Plan to Roll Out Conservatively but to Explore Optimistically for Model-Based RL", "authors": [ "Xiyao Wang", "Ruijie Zheng", "Yanchao Sun", "Ruonan Jia", "Wichayaporn Wongkamjan", "Huazhe Xu", "Furong Huang" ], "abstract": "Dyna-style model-based reinforcement learning contains two phases: model rollouts to generate sample for policy learning and real environment exploration using current policy for dynamics model learning. However, due to the complex real-world environment, it is inevitable to learn an imperfect dynamics model with model prediction error, which can further mislead policy learning and result in sub-optimal solutions. In this paper, we propose $\\texttt{COPlanner}$, a planning-driven framework for model-based methods to address the inaccurately learned dynamics model problem with conservative model rollouts and optimistic environment exploration. $\\texttt{COPlanner}$ leverages an uncertainty-aware policy-guided model predictive control (UP-MPC) component to plan for multi-step uncertainty estimation. This estimated uncertainty then serves as a penalty during model rollouts and as a bonus during real environment exploration respectively, to choose actions. Consequently, $\\texttt{COPlanner}$ can avoid model uncertain regions through conservative model rollouts, thereby alleviating the influence of model error. Simultaneously, it explores high-reward model uncertain regions to reduce model error actively through optimistic real environment exploration. $\\texttt{COPlanner}$ is a plug-and-play framework that can be applied to any dyna-style model-based methods. Experimental results on a series of proprioceptive and visual continuous control tasks demonstrate that both sample efficiency and asymptotic performance of strong model-based methods are significantly improved combined with $\\texttt{COPlanner}$.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=jnFcKjtUPN", "arxiv_id": "2310.07220", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 18821, "title": "DrM: Mastering Visual Reinforcement Learning through Dormant Ratio Minimization", "authors": [ "Guowei Xu", "Ruijie Zheng", "Yongyuan Liang", "Xiyao Wang", "Zhecheng Yuan", "Tianying Ji", "Yu Luo", "Xiaoyu Liu", "Jiaxin Yuan", "Pu Hua", "Shuzhen Li", "Yanjie Ze", "Hal Daum\u00e9 III", "Furong Huang", "Huazhe Xu" ], "abstract": "Visual reinforcement learning (RL) has shown promise in continuous control tasks.Despite its progress, current algorithms are still unsatisfactory in virtually every aspect of the performance such as sample efficiency, asymptotic performance, and their robustness to the choice of random seeds.In this paper, we identify a major shortcoming in existing visual RL methods that is the agents often exhibit sustained inactivity during early training, thereby limiting their ability to explore effectively. Expanding upon this crucial observation, we additionally unveil a significant correlation between the agents' inclination towards motorically inactive exploration and the absence of neuronal activity within their policy networks.To quantify this inactivity, we adopt dormant ratio as a metric to measure inactivity in the RL agent's network.Empirically, we also recognize that the dormant ratio can act as a standalone indicator of an agent's activity level, regardless of the received reward signals.Leveraging the aforementioned insights, we introduce DrM, a method that uses three core mechanisms to guide agents' exploration-exploitation trade-offs by actively minimizing the dormant ratio. Experiments demonstrate that DrM achieves significant improvements in sample efficiency and asymptotic performance with no broken seeds (76 seeds in total) across three continuous control benchmark environments, including DeepMind Control Suite, MetaWorld, and Adroit.Most importantly, DrM is the first model-free algorithm that consistently solves tasks in both the Dog and Manipulator domains from the DeepMind Control Suite as well as three dexterous hand manipulation tasks without demonstrations in Adroit, all based on pixel observations.", "type": "Spotlight Poster", "OpenReview": "https://openreview.net/forum?id=MSe8YFbhUE", "arxiv_id": "2310.19668", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 18756, "title": "CNN Kernels Can Be the Best Shapelets", "authors": [ "Eric Qu", "Yansen Wang", "Xufang Luo", "Wenqiang He", "Kan Ren", "Dongsheng Li" ], "abstract": "Shapelets and CNN are two typical approaches to model time series. Shapelets aim at finding a set of sub-sequences that extract feature-based interpretable shapes, but may suffer from accuracy and efficiency issues. CNN performs well by encoding sequences with a series of hidden representations, but lacks interpretability. In this paper, we demonstrate that shapelets are essentially equivalent to a specific type of CNN kernel with a squared norm and pooling. Based on this finding, we propose ShapeConv, an interpretable CNN layer with its kernel serving as shapelets to conduct time-series modeling tasks in both supervised and unsupervised settings. By incorporating shaping regularization, we enforce the similarity for maximum interpretability. We also find human knowledge can be easily injected to ShapeConv by adjusting its initialization and model performance is boosted with it. Experiments show that ShapeConv can achieve state-of-the-art performance on time-series benchmarks without sacrificing interpretability and controllability.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=O8ouVV8PjF", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 17395, "title": "Massively Scalable Inverse Reinforcement Learning in Google Maps", "authors": [ "Matt Barnes", "Matthew Abueg", "Oliver F. Lange", "Matt Deeds", "Jason Trader", "Denali Molitor", "Markus Wulfmeier", "Shawn O'Banion" ], "abstract": "Optimizing for humans\u2019 latent preferences remains a grand challenge in route recommendation. Prior research has provided increasingly general methods based on inverse reinforcement learning (IRL), yet no approach has successfully addressed planetary-scale routing problems with hundreds of millions of states and demonstration trajectories. In this paper, we introduce scaling techniques based on graph compression, spatial parallelization, and improved initialization conditions inspired by a connection to eigenvector algorithms. We revisit classic algorithms in the routing context, and make the key observation that there exists a trade-off between the use of cheap, deterministic planners and expensive yet robust stochastic policies. This insight is leveraged in Receding Horizon Inverse Planning (RHIP), a new generalization of classic IRL algorithms that provides fine-grained control over performance trade-offs via its planning horizon. Our contributions culminate in a policy that achieves a 16-24% improvement in route quality at a global scale, and to the best of our knowledge, represents the largest published benchmark of IRL algorithms in a real-world setting to date. We conclude by conducting an ablation study of key components, presenting negative results from alternative eigenvalue solvers, and identifying opportunities to further improve scalability via IRL-specific batching strategies.", "type": "Spotlight Poster", "OpenReview": "https://openreview.net/forum?id=z3L59iGALM", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 18216, "title": "Enhancing Neural Subset Selection: Integrating Background Information into Set Representations", "authors": [ "Binghui Xie", "Yatao Bian", "Kaiwen Zhou", "Yongqiang Chen", "Peilin Zhao", "Bo Han", "Wei Meng", "James Cheng" ], "abstract": "Learning neural subset selection tasks, such as compound selection in AI-aided drug discovery, have become increasingly pivotal across diverse applications. The existing methodologies in the field primarily concentrate on constructing models that capture the relationship between utility function values and subsets within their respective supersets. However, these approaches tend to overlook the valuable information contained within the superset when utilizing neural networks to model set functions. In this work, we address this oversight by adopting a probabilistic perspective. Our theoretical findings demonstrate that when the target value is conditioned on both the input set and subset, it is essential to incorporate an invariant sufficient statistic of the superset into the subset of interest for effective learning. This ensures that the output value remains invariant to permutations of the subset and its corresponding superset, enabling identification of the specific superset from which the subset originated. Motivated by these insights, we propose a simple yet effective information aggregation module designed to merge the representations of subsets and supersets from a permutation invariance perspective. Comprehensive empirical evaluations across diverse tasks and datasets validate the enhanced efficacy of our approach over conventional methods, underscoring the practicality and potency of our proposed strategies in real-world contexts.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=eepoE7iLpL", "arxiv_id": "2402.03139", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19610, "title": "TopoMLP: A Simple yet Strong Pipeline for Driving Topology Reasoning", "authors": [ "Dongming Wu", "Jiahao Chang", "Fan Jia", "Yingfei Liu", "Tiancai Wang", "Jianbing Shen" ], "abstract": "Topology reasoning aims to comprehensively understand road scenes and present drivable routes in autonomous driving. It requires detecting road centerlines (lane) and traffic elements, further reasoning their topology relationship, \\textit{i.e.}, lane-lane topology, and lane-traffic topology. In this work, we first present that the topology score relies heavily on detection performance on lane and traffic elements. Therefore, we introduce a powerful 3D lane detector and an improved 2D traffic element detector to extend the upper limit of topology performance. Further, we propose TopoMLP, a simple yet high-performance pipeline for driving topology reasoning. Based on the impressive detection performance, we develop two simple MLP-based heads for topology generation. TopoMLP achieves state-of-the-art performance on OpenLane-V2 dataset, \\textit{i.e.}, 41.2\\% OLS with ResNet-50 backbone. It is also the 1st solution for 1st OpenLane Topology in Autonomous Driving Challenge. We hope such simple and strong pipeline can provide some new insights to the community. Code is at https://github.com/wudongming97/TopoMLP.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=0gTW5JUFTW", "arxiv_id": "2310.06753", "GitHub": [ "https://github.com/wudongming97/TopoMLP" ], "Space": [], "Model": [], "Dataset": [] }, { "id": 17377, "title": "Shadow Cones: A Generalized Framework for Partial Order Embeddings", "authors": [ "Tao Yu", "Toni J.B. Liu", "Albert Tseng", "Christopher De Sa" ], "abstract": "Hyperbolic space has proven to be well-suited for capturing hierarchical relations in data, such as trees and directed acyclic graphs. Prior work introduced the concept of entailment cones, which uses partial orders defined by nested cones in the Poincar\\'e ball to model hierarchies. Here, we introduce the ``shadow cones\" framework, a physics-inspired entailment cone construction. Specifically, we model partial orders as subset relations between shadows formed by a light source and opaque objects in hyperbolic space. The shadow cones framework generalizes entailment cones to a broad class of formulations and hyperbolic space models beyond the Poincar\\'e ball. This results in clear advantages over existing constructions: for example, shadow cones possess better optimization properties over constructions limited to the Poincar\\'e ball. Our experiments on datasets of various sizes and hierarchical structures show that shadow cones consistently and significantly outperform existing entailment cone constructions. These results indicate that shadow cones are an effective way to model partial orders in hyperbolic space, offering physically intuitive and novel insights about the nature of such structures.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=zbKcFZ6Dbp", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19183, "title": "The Generative AI Paradox: \u201cWhat It Can Create, It May Not Understand\u201d", "authors": [ "Peter West", "Ximing Lu", "Nouha Dziri", "Faeze Brahman", "Linjie Li", "Jena D. Hwang", "Liwei Jiang", "Jillian Fisher", "Abhilasha Ravichander", "Khyathi Chandu", "Benjamin Newman", "Pang Wei Koh", "Allyson Ettinger", "Yejin Choi" ], "abstract": "The recent wave of generative AI has sparked unprecedented global attention, with both excitement and concern over potentially superhuman levels of artificial intelligence: models now take only seconds to produce outputs that would challenge or exceed the capabilities even of expert humans. At the same time, models still show basic errors in understanding that would not be expected even in non-expert humans. This presents us with an apparent paradox: how do we reconcile seemingly superhuman capabilities with the persistence of errors that few humans would make? In this work, we posit that this tension reflects a divergence in the configuration of intelligence in today's generative models relative to intelligence in humans. Specifically, we propose and test the **Generative AI Paradox** hypothesis: generative models, having been trained directly to reproduce expert-like outputs, acquire generative capabilities that are not contingent upon---and can therefore exceed---their ability to understand those same types of outputs. This contrasts with humans, for whom basic understanding almost always precedes the ability togenerate expert-level outputs. We test this hypothesis through controlled experiments analyzing generation vs.~understanding in generative models, across both language and image modalities. Our results show that although models can outperform humans in generation, they consistently fall short of human capabilities in measures of understanding, as well as weaker correlation between generation and understanding performance, and more brittleness to adversarial inputs. Our findings support the hypothesis that models' generative capability may not be contingent upon understanding capability, and call for caution in interpreting artificial intelligence by analogy to human intelligence.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=CF8H8MS5P8", "arxiv_id": "2311.00059", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 17744, "title": "LayoutNUWA: Revealing the Hidden Layout Expertise of Large Language Models", "authors": [ "Zecheng Tang", "Chenfei Wu", "Juntao Li", "Nan Duan" ], "abstract": "Graphic layout generation, a growing research field, plays a significant role in user engagement and information perception. Existing methods primarily treat layout generation as a numerical optimization task, focusing on quantitative aspects while overlooking the semantic information of layout, such as the relationship between each layout element. In this paper, we propose LayoutNUWA, the first model that treats layout generation as a code generation task to enhance semantic information and harness the hidden layout expertise of large language models~(LLMs). Concretely, we develop a Code Instruct Tuning (CIT) approach comprising three interconnected modules: 1) the Code Initialization (CI) module quantifies the numerical conditions and initializes them as HTML code with strategically placed masks; 2) the Code Completion (CC) module employs the formatting knowledge of LLMs to fill in the masked portions within the HTML code; 3) the Code Rendering (CR) module transforms the completed code into the final layout output, ensuring a highly interpretable and transparent layout generation procedure that directly maps code to a visualized layout. We attain significant state-of-the-art performance (even over 50\\% improvements compared to previous works) on multiple datasets, showcasing the strong capabilities of LayoutNUWA.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=qCUWVT0Ayy", "arxiv_id": "2309.09506", "GitHub": [ "https://github.com/ProjectNUWA/LayoutNUWA" ], "Space": [], "Model": [], "Dataset": [] }, { "id": 17466, "title": "Are Bert Family Good Instruction Followers? A Study on Their Potential And Limitations", "authors": [ "yisheng xiao", "Juntao Li", "Zechen Sun", "Zechang Li", "Qingrong Xia", "Xinyu Duan", "Zhefeng Wang", "Min Zhang" ], "abstract": "Language modeling at scale has proven very effective and brought unprecedented success to natural language models. Many typical representatives, especially decoder-only models, e.g., BLOOM and LLaMA, and encoder-decoder models, e.g., Flan-T5 and AlexaTM, have exhibited incredible instruction-following capabilities while keeping strong task completion ability. These large language models can achieve superior performance in various tasks and even yield emergent capabilities, e.g., reasoning and universal generalization. Though the above two paradigms are mainstream and well explored, the potential of the BERT family, which are encoder-only based models and have ever been one of the most representative pre-trained models, also deserves attention, at least should be discussed. In this work, we adopt XML-R to explore the effectiveness of the BERT family for instruction following and zero-shot learning. We first design a simple yet effective strategy to utilize the encoder-only models for generation tasks and then conduct multi-task instruction tuning. Experimental results demonstrate that our fine-tuned model, Instruct-XMLR, outperforms Bloomz on all evaluation tasks and achieves comparable performance with mT0 on most tasks. Surprisingly, Instruct-XMLR also possesses strong task and language generalization abilities, indicating that Instruct-XMLR can also serve as a good instruction follower and zero-shot learner. Besides, Instruct-XMLR can accelerate decoding due to its non-autoregressive generation manner, achieving around 3 times speedup compared with current autoregressive large language models. Although we also witnessed several limitations through our experiments, such as the performance decline in long-generation tasks and the shortcoming of length prediction, Instruct-XMLR can still become a good member of the family of current large language models.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=x8VNtpCu1I", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19217, "title": "FreeReg: Image-to-Point Cloud Registration Leveraging Pretrained Diffusion Models and Monocular Depth Estimators", "authors": [ "Haiping Wang", "Yuan Liu", "Bing WANG", "YUJING SUN", "Zhen Dong", "Wenping Wang", "Bisheng Yang" ], "abstract": "Matching cross-modality features between images and point clouds is a fundamental problem for image-to-point cloud registration. However, due to the modality difference between images and points, it is difficult to learn robust and discriminative cross-modality features by existing metric learning methods for feature matching. Instead of applying metric learning on cross-modality data, we propose to unify the modality between images and point clouds by pretrained large-scale models first, and then establish robust correspondence within the same modality. We show that the intermediate features, called diffusion features, extracted by depth-to-image diffusion models are semantically consistent between images and point clouds, which enables the building of coarse but robust cross-modality correspondences. We further extract geometric features on depth maps produced by the monocular depth estimator. By matching such geometric features, we significantly improve the accuracy of the coarse correspondences produced by diffusion features. Extensive experiments demonstrate that without any task-specific training, direct utilization of both features produces accurate image-to-point cloud registration. On three public indoor and outdoor benchmarks, the proposed method averagely achieves a 20.6 percent improvement in Inlier Ratio, a $3.0\\times$ higher Inlier Number, and a 48.6 percent improvement in Registration Recall than existing state-of-the-arts. The codes are available in the supplementary material and will be released upon acceptance.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=BPb5AhT2Vf", "arxiv_id": "2310.03420", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 17657, "title": "ViDA: Homeostatic Visual Domain Adapter for Continual Test Time Adaptation", "authors": [ "Jiaming Liu", "Senqiao Yang", "Peidong Jia", "Renrui Zhang", "Ming Lu", "Yandong Guo", "Wei Xue", "Shanghang Zhang" ], "abstract": "Since real-world machine systems are running in non-stationary environments, Continual Test-Time Adaptation (CTTA) task is proposed to adapt the pre-trained model to continually changing target domains. Recently, existing methods mainly focus on model-based adaptation, which aims to leverage a self-training manner to extract the target domain knowledge. However, pseudo labels can be noisy and the updated model parameters are unreliable under dynamic data distributions, leading to error accumulation and catastrophic forgetting in the continual adaptation process. To tackle these challenges and maintain the model plasticity, we tactfully design a Visual Domain Adapter (ViDA) for CTTA, explicitly handling both domain-specific and domain-shared knowledge. Specifically, we first comprehensively explore the different domain representations of the adapters with trainable high-rank or low-rank embedding spaces. Then we inject ViDAs into the pre-trained model, which leverages high-rank and low-rank features to adapt the current domain distribution and maintain the continual domain-shared knowledge, respectively. To exploit the low-rank and high-rank ViDAs more effectively, we further propose a Homeostatic Knowledge Allotment (HKA) strategy, which adaptively combines different knowledge from each ViDA. Extensive experiments conducted on four widely used benchmarks demonstrate that our proposed method achieves state-of-the-art performance in both classification and segmentation CTTA tasks. Note that, our method can be regarded as a novel transfer paradigm for large-scale models, delivering promising results in adaptation to continually changing distributions.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=sJ88Wg5Bp5", "arxiv_id": "2306.04344", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19309, "title": "Learning Flexible Body Collision Dynamics with Hierarchical Contact Mesh Transformer", "authors": [ "Youn-Yeol Yu", "Jeongwhan Choi", "Woojin Cho", "Kookjin Lee", "Nayong Kim", "Kiseok Chang", "ChangSeung Woo", "ILHO KIM", "SeokWoo Lee", "Joon Young Yang", "SOOYOUNG YOON", "Noseong Park" ], "abstract": "Recently, many mesh-based graph neural network (GNN) models have been proposed for modeling complex high-dimensional physical systems. Remarkable achievements have been made in significantly reducing the solving time compared to traditional numerical solvers. These methods are typically designed to i) reduce the computational cost in solving physical dynamics and/or ii) propose techniques to enhance the solution accuracy in fluid and rigid body dynamics. However, it remains under-explored whether they are effective in addressing the challenges of flexible body dynamics, where instantaneous collisions occur within a very short timeframe.In this paper, we present Hierarchical Contact Mesh Transformer (HCMT), which uses hierarchical mesh structures and can learn long-range dependencies (occurred by collisions) among spatially distant positions of a body --- two close positions in a higher-level mesh corresponds to two distant positions in a lower-level mesh. HCMT enables long-range interactions, and the hierarchical mesh structure quickly propagates collision effects to faraway positions. To this end, it consists of a contact mesh Transformer and a hierarchical mesh Transformer (CMT and HMT, respectively). Lastly, we propose a unique flexible body dynamics dataset, which is commonly used for product designs. We also compare the performance of several baselines using well-known benchmark datasets. Our results show that HCMT provides significant performance improvements over existing methods.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=90yw2uM6J5", "arxiv_id": "2312.12467", "GitHub": [ "https://github.com/yuyudeep/hcmt" ], "Space": [], "Model": [], "Dataset": [] }, { "id": 17918, "title": "Submodular Reinforcement Learning", "authors": [ "Manish Prajapat", "Mojmir Mutny", "Melanie Zeilinger", "Andreas Krause" ], "abstract": "In reinforcement learning (RL), rewards of states are typically considered additive, and following the Markov assumption, they are independent of states visited previously. In many important applications, such as coverage control, experiment design and informative path planning, rewards naturally have diminishing returns, i.e., their value decreases in light of similar states visited previously. To tackle this, we propose Submodular RL (subRL), a paradigm which seeks to optimize more general, non-additive (and history-dependent) rewards modelled via submodular set functions, which capture diminishing returns. Unfortunately, in general, even in tabular settings, we show that the resulting optimization problem is hard to approximate. On the other hand, motivated by the success of greedy algorithms in classical submodular optimization, we propose subPO, a simple policy gradient-based algorithm for subRL that handles non-additive rewards by greedily maximizing marginal gains. Indeed, under some assumptions on the underlying Markov Decision Process (MDP), subPO recovers optimal constant factor approximations of submodular bandits. Moreover, we derive a natural policy gradient approach for locally optimizing subRL instances even in large state- and action- spaces. We showcase the versatility of our approach by applying subPO to several applications, such as biodiversity monitoring, Bayesian experiment design, informative path planning, and coverage maximization. Our results demonstrate sample efficiency, as well as scalability to high-dimensional state-action spaces.", "type": "Spotlight Poster", "OpenReview": "https://openreview.net/forum?id=loYSzjSaAK", "arxiv_id": "2307.13372", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19797, "title": "One-shot Empirical Privacy Estimation for Federated Learning", "authors": [ "Galen Andrew", "Peter Kairouz", "Sewoong Oh", "Alina Oprea", "Hugh Brendan McMahan", "Vinith Menon Suriyakumar" ], "abstract": "Privacy estimation techniques for differentially private (DP) algorithms are useful for comparing against analytical bounds, or to empirically measure privacy loss insettings where known analytical bounds are not tight. However, existing privacy auditing techniques usually make strong assumptions on the adversary (e.g., knowl-edge of intermediate model iterates or the training data distribution), are tailored to specific tasks, model architectures, or DP algorithm, and/or require retraining the model many times (typically on the order of thousands). These shortcomings make deploying such techniques at scale difficult in practice, especially in federatedsettings where model training can take days or weeks. In this work, we present a novel \u201cone-shot\u201d approach that can systematically address these challenges, al-lowing efficient auditing or estimation of the privacy loss of a model during the same, single training run used to fit model parameters, and without requiring anyaprioriknowledge about the model architecture, task, or DP algorithm. We show that our method provides provably correct estimates for the privacy loss under the Gaussian mechanism, and we demonstrate its performance on a well-established FL benchmark dataset under several adversarial threat models.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=0BqyZSWfzo", "arxiv_id": "2302.03098", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19194, "title": "Inducing High Energy-Latency of Large Vision-Language Models with Verbose Images", "authors": [ "Kuofeng Gao", "Yang Bai", "Jindong Gu", "Shu-Tao Xia", "Philip Torr", "Zhifeng Li", "Wei Liu" ], "abstract": "Large vision-language models (VLMs) such as GPT-4 have achieved exceptional performance across various multi-modal tasks. However, the deployment of VLMs necessitates substantial energy consumption and computational resources. Once attackers maliciously induce high energy consumption and latency time (energy-latency cost) during inference of VLMs, it will exhaust computational resources. In this paper, we explore this attack surface about availability of VLMs and aim to induce high energy-latency cost during inference of VLMs. We find that high energy-latency cost during inference of VLMs can be manipulated by maximizing the length of generated sequences. To this end, we propose verbose images, with the goal of crafting an imperceptible perturbation to induce VLMs to generate long sentences during inference. Concretely, we design three loss objectives. First, a loss is proposed to delay the occurrence of end-of-sequence (EOS) token, where EOS token is a signal for VLMs to stop generating further tokens. Moreover, an uncertainty loss and a token diversity loss are proposed to increase the uncertainty over each generated token and the diversity among all tokens of the whole generated sequence, respectively, which can break output dependency at token-level and sequence-level. Furthermore, a temporal weight adjustment algorithm is proposed, which can effectively balance these losses. Extensive experiments demonstrate that our verbose images can increase the length of generated sequences by 7.87\u00d7 and 8.56\u00d7 compared to original images on MS-COCO and ImageNet datasets, which presents potential challenges for various applications.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=BteuUysuXX", "arxiv_id": "2401.11170", "GitHub": [ "https://github.com/KuofengGao/Verbose_Images" ], "Space": [], "Model": [], "Dataset": [] }, { "id": 19441, "title": "Be Aware of the Neighborhood Effect: Modeling Selection Bias under Interference for Recommendation", "authors": [ "Haoxuan Li", "Chunyuan Zheng", "Sihao Ding", "Peng Wu", "Zhi Geng", "Fuli Feng", "Xiangnan He" ], "abstract": "The interaction between users and recommender systems is not only affected by selection bias but also the neighborhood effect, i.e., the interaction between a user and an item is affected by the interactions between other users and other items, or between the same user and other items, or between other users and the same item. Many previous studies have focused on addressing selection bias to achieve unbiased learning of the prediction model, but the lack of consideration of neighborhood effects can lead to biased estimates and suboptimal performance of the prediction model. In this paper, we formally formulate the neighborhood effect as an interference problem from the perspective of causal inference and introduce a treatment representation to capture the neighborhood effect. On this basis, we propose a novel ideal loss that can be used to deal with selection bias in the presence of neighborhood effects. In addition, we further develop two novel estimators for the ideal loss. We theoretically establish the connection between the proposed methods and previous methods ignoring the neighborhood effect and show that the proposed methods achieve unbiased learning when both selection bias and neighborhood effects are present, while the existing methods are biased. Extensive semi-synthetic and real-world experiments are conducted to demonstrate the effectiveness of the proposed methods.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=52fz5sUAy2", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19055, "title": "Debiased Collaborative Filtering with Kernel-based Causal Balancing", "authors": [ "Haoxuan Li", "Yanghao Xiao", "Chunyuan Zheng", "Peng Wu", "Zhi Geng", "Xu Chen", "Peng Cui" ], "abstract": "Collaborative filtering builds personalized models from the collected user feedback. However, the collected data is observational rather than experimental, leading to various biases in the data, which can significantly affect the learned model. To address this issue, many studies have focused on propensity-based methods to combat the selection bias by reweighting the sample loss, and demonstrate thatbalancing is important for debiasing both theoretically and empirically. However, there are two questions that still need to be addressed: which function class should be balanced and how to effectively balance that function class? In this paper, we first perform theoretical analysis to show the effect of balancing finite-dimensional function classes on the bias of IPS and DR methods, and based on this, we propose a universal kernel-based balancing method to balance functions on the reproducing kernel Hilbert space. In addition, we propose a novel adaptive causal balancing method during the alternating update between unbiased evaluation and training of the prediction model. Specifically, the prediction loss of the model is projected in the kernel-based covariate function space, and the projection coefficients are used to determine which functions should be prioritized for balancing to reduce the estimation bias. We conduct extensive experiments on three real-world datasets to demonstrate the effectiveness of the proposed approach.", "type": "Spotlight Poster", "OpenReview": "https://openreview.net/forum?id=Ffjc8ApSbt", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19133, "title": "MetaCoCo: A New Few-Shot Classification Benchmark with Spurious Correlation", "authors": [ "Min Zhang", "Haoxuan Li", "Fei Wu", "Kun Kuang" ], "abstract": "Out-of-distribution (OOD) problems in few-shot classification (FSC) occur when novel classes sampled from testing distributions differ from base classes drawn from training distributions, which considerably degrades the performance of deep learning models deployed in real-world applications. Recent studies suggest that the OOD problems in FSC mainly including: (a) cross-domain few-shot classification (CD-FSC) and (b) spurious-correlation few-shot classification (SC-FSC). Specifically, CD-FSC occurs when a classifier learns transferring knowledge from base classes drawn from \\underline{seen} training distributions but recognizes novel classes sampled from unseen testing distributions. In contrast, SC-FSC arises when a classifier relies on non-causal features (or contexts) that happen to be correlated with the labels (or concepts) in base classes but such relationships no longer hold during the model deployment. Despite CD-FSC has been extensively studied, SC-FSC remains understudied due to lack of the corresponding evaluation benchmarks. To this end, we present Meta Concept Context (MetaCoCo), a benchmark with spurious-correlation shifts collected from real-world scenarios. Moreover, to quantify the extent of spurious-correlation shifts of the presented MetaCoCo, we further propose a metric by using CLIP as a pre-trained vision-language model. Extensive experiments on the proposed benchmark are performed to evaluate the state-of-the-art methods in FSC, cross-domain shifts, and self-supervised learning. The experimental results show that the performance of the existing methods degrades significantly in the presence of spurious-correlation shifts. We open-source all codes of our benchmark and hope that the proposed MetaCoCo can facilitate future research on spurious-correlation shifts problems in FSC.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=DiWRG9JTWZ", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19416, "title": "Jointly Training Large Autoregressive Multimodal Models", "authors": [ "Emanuele Aiello", "LILI YU", "Yixin Nie", "Armen Aghajanyan", "Barlas Oguz" ], "abstract": "In recent years, advances in the large-scale pretraining of language and text-to-image models have revolutionized the field of machine learning. Yet, integrating these two modalities into a single, robust model capable of generating seamless multimodal outputs remains a significant challenge. To address this gap, we present the Joint Autoregressive Mixture (JAM) framework, a modular approach that systematically fuses existing text and image generation models. We also introduce a specialized, data-efficient instruction-tuning strategy, tailored for mixed-modal generation tasks. Our final instruct-tuned model demonstrates unparalleled performance in generating high-quality multimodal outputs and represents the first model explicitly designed for this purpose.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=5jcav5RcKw", "arxiv_id": "2309.15564", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 18835, "title": "Learning interpretable control inputs and dynamics underlying animal locomotion", "authors": [ "Thomas Soares Mullen", "Marine Schimel", "Guillaume Hennequin", "Christian K. Machens", "Michael Orger", "Adrien Jouary" ], "abstract": "A central objective in neuroscience is to understand how the brain orchestrates movement. Recent advances in automated tracking technologies have made it possible to document behavior with unprecedented temporal resolution and scale, generating rich datasets which can be exploited to gain insights into the neural control of movement. One common approach is to identify stereotypical motor primitives using cluster analysis. However, this categorical description can limit our ability to model the effect of more continuous control schemes. Here we take a control theoretic approach to behavioral modeling and argue that movements can be understood as the output of a controlled dynamical system. Previously, models of movement dynamics, trained solely on behavioral data, have been effective in reproducing observed features of neural activity. These models addressed specific scenarios where animals were trained to execute particular movements upon receiving a prompt. In this study, we extend this approach to analyze the full natural locomotor repertoire of an animal: the zebrafish larva. Our findings demonstrate that this repertoire can be effectively generated through a sparse control signal driving a latent Recurrent Neural Network (RNN). Our model's learned latent space preserves key kinematic features and disentangles different categories of movements. To further interpret the latent dynamics, we used balanced model reduction to yield a simplified model. Collectively, our methods serve as a case study for interpretable system identification, and offer a novel framework for understanding neural activity in relation to movement.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=MFCjgEOLJT", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 18059, "title": "CausalTime: Realistically Generated Time-series for Benchmarking of Causal Discovery", "authors": [ "Yuxiao Cheng", "Ziqian Wang", "Tingxiong Xiao", "Qin Zhong", "Jinli Suo", "Kunlun He" ], "abstract": "Time-series causal discovery (TSCD) is a fundamental problem of machine learning. However, existing synthetic datasets cannot properly evaluate or predict the algorithms' performance on real data. This study introduces the CausalTime pipeline to generate time-series that highly resemble the real data and with ground truth causal graphs for quantitative performance evaluation. The pipeline starts from real observations in a specific scenario and produces a matching benchmark dataset. Firstly, we harness deep neural networks along with normalizing flow to accurately capture realistic dynamics. Secondly, we extract hypothesized causal graphs by performing importance analysis on the neural network or leveraging prior knowledge. Thirdly, we derive the ground truth causal graphs by splitting the causal model into causal term, residual term, and noise term. Lastly, using the fitted network and the derived causal graph, we generate corresponding versatile time-series proper for algorithm assessment. In the experiments, we validate the fidelity of the generated data through qualitative and quantitative experiments, followed by a benchmarking of existing TSCD algorithms using these generated datasets. CausalTime offers a feasible solution to evaluating TSCD algorithms in real applications and can be generalized to a wide range of fields. For easy use of the proposed approach, we also provide a user-friendly website, hosted on www.causaltime.cc.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=iad1yyyGme", "arxiv_id": "2310.01753", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 18615, "title": "A Study of Bayesian Neural Network Surrogates for Bayesian Optimization", "authors": [ "Yucen Lily Li", "Tim G. J. Rudner", "Andrew Gordon Wilson" ], "abstract": "Bayesian optimization is a highly efficient approach to optimizing objective functions which are expensive to query. These objectives are typically represented by Gaussian process (GP) surrogate models which are easy to optimize and support exact inference. While standard GP surrogates have been well-established in Bayesian optimization, Bayesian neural networks (BNNs) have recently become practical function approximators, with many benefits over standard GPs such as the ability to naturally handle non-stationarity and learn representations for high-dimensional data. In this paper, we study BNNs as alternatives to standard GP surrogates for optimization. We consider a variety of approximate inference procedures for finite-width BNNs, including high-quality Hamiltonian Monte Carlo, low-cost stochastic MCMC, and heuristics such as deep ensembles. We also consider infinite-width BNNs, linearized Laplace approximations, and partially stochastic models such as deep kernel learning. We evaluate this collection of surrogate models on diverse problems with varying dimensionality, number of objectives, non-stationarity, and discrete and continuous inputs. We find: (i) the ranking of methods is highly problem dependent, suggesting the need for tailored inductive biases; (ii) HMC is the most successful approximate inference procedure for fully stochastic BNNs; (iii) full stochasticity may be unnecessary as deep kernel learning is relatively competitive; (iv) deep ensembles perform relatively poorly; (v) infinite-width BNNs are particularly promising, especially in high dimensions.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=SA19ijj44B", "arxiv_id": "2305.20028", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 17662, "title": "SocioDojo: Building Lifelong Analytical Agents with Real-world Text and Time Series", "authors": [ "Junyan Cheng", "Peter Chin" ], "abstract": "We introduce SocioDojo, an open-ended lifelong learning environment for developing ready-to-deploy autonomous agents capable of performing human-like analysis and decision-making on societal topics such as economics, finance, politics, and culture. It consists of (1) information sources from news, social media, reports, etc., (2) a knowledge base built from books, journals, and encyclopedias, plus a toolbox of Internet and knowledge graph search interfaces, (3) 30K high-quality time series in finance, economy, society, and polls, which support a novel task called \"hyperportfolio\", that can reliably and scalably evaluate societal analysis and decision-making power of agents, inspired by portfolio optimization with time series as assets to \"invest\". We also propose a novel Analyst-Assistant-Actuator architecture for the hyperportfolio task, and a Hypothesis & Proof prompting for producing in-depth analyses on input news, articles, etc. to assist decision-making. We perform experiments and ablation studies to explore the factors that impact performance. The results show that our proposed method achieves improvements of 32.4% and 30.4% compared to the state-of-the-art method in the two experimental settings.", "type": "Spotlight Poster", "OpenReview": "https://openreview.net/forum?id=s9z0HzWJJp", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 17562, "title": "Bridging Neural and Symbolic Representations with Transitional Dictionary Learning", "authors": [ "Junyan Cheng", "Peter Chin" ], "abstract": "This paper introduces a novel Transitional Dictionary Learning (TDL) framework that can implicitly learn symbolic knowledge, such as visual parts and relations, by reconstructing the input as a combination of parts with implicit relations. We propose a game-theoretic diffusion model to decompose the input into visual parts using the dictionaries learned by the Expectation Maximization (EM) algorithm, implemented as the online prototype clustering, based on the decomposition results. Additionally, two metrics, clustering information gain, and heuristic shape score are proposed to evaluate the model. Experiments are conducted on three abstract compositional visual object datasets, which require the model to utilize the compositionality of data instead of simply exploiting visual features. Then, three tasks on symbol grounding to predefined classes of parts and relations, as well as transfer learning to unseen classes, followed by a human evaluation, were carried out on these datasets. The results show that the proposed method discovers compositional patterns, which significantly outperforms the state-of-the-art unsupervised part segmentation methods that rely on visual features from pre-trained backbones. Furthermore, the proposed metrics are consistent with human evaluations.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=uqxBTcWRnj", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19763, "title": "Amortizing intractable inference in large language models", "authors": [ "Edward J Hu", "Moksh Jain", "Eric Elmoznino", "Younesse Kaddar", "Guillaume Lajoie", "Yoshua Bengio", "Nikolay Malkin" ], "abstract": "Autoregressive large language models (LLMs) compress knowledge from their training data through next-token conditional distributions. This limits tractable querying of this knowledge to start-to-end autoregressive sampling. However, many tasks of interest---including sequence continuation, infilling, and other forms of constrained generation---involve sampling from intractable posterior distributions. We address this limitation by using amortized Bayesian inference to sample from these intractable posteriors. Such amortization is algorithmically achieved by fine-tuning LLMs via diversity-seeking reinforcement learning algorithms: generative flow networks (GFlowNets). We empirically demonstrate that this distribution-matching paradigm of LLM fine-tuning can serve as an effective alternative to maximum-likelihood training and reward-maximizing policy optimization. As an important application, we interpret chain-of-thought reasoning as a latent variable modeling problem and demonstrate that our approach enables data-efficient adaptation of LLMs to tasks that require multi-step rationalization and tool use.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=Ouj6p4ca60", "arxiv_id": "2310.04363", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 17723, "title": "Closing the Gap between TD Learning and Supervised Learning - A Generalisation Point of View.", "authors": [ "Raj Ghugare", "Matthieu Geist", "Glen Berseth", "Benjamin Eysenbach" ], "abstract": "Some reinforcement learning (RL) algorithms have the capability of recombining together pieces of previously seen experience to solve a task never seen before during training. This oft-sought property is one of the few ways in which dynamic programming based RL algorithms are considered different from supervised learning (SL) based RL algorithms. Yet, recent RL methods based on off-the-shelf SL algorithms achieve excellent results without an explicit mechanism for stitching; it remains unclear whether those methods forgo this important stitching property. This paper studies this question in the setting of goal-reaching problems. We show that the desirable stitching property corresponds to a form of generalization: after training on a distribution of (state, goal) pairs, one would like to evaluate on (state, goal) pairs not seen together in the training data. Our analysis shows that this sort of generalization is different from i.i.d. generalization. This connection between stitching and generalization reveals why we should not expect existing RL methods based on SL to perform stitching, even in the limit of large datasets and models. We experimentally validate this result on carefully constructed datasets.This connection suggests a simple remedy, the same remedy for improving generalization in supervised learning: data augmentation. We propose a naive temporal data augmentation approach and demonstrate that adding it to RL methods based on SL enables them to successfully stitch together experience, so that they succeed in navigating between states and goals unseen together during training.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=qg5JENs0N4", "arxiv_id": "2401.11237", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19739, "title": "Gene Regulatory Network Inference in the Presence of Dropouts: a Causal View", "authors": [ "Haoyue Dai", "Ignavier Ng", "Gongxu Luo", "Peter Spirtes", "Petar Stojanov", "Kun Zhang" ], "abstract": "Gene regulatory network inference (GRNI) is a challenging problem, particularly owing to the presence of zeros in single-cell RNA sequencing data: some are biological zeros representing no gene expression, while some others are technical zeros arising from the sequencing procedure (aka dropouts), which may bias GRNI by distorting the joint distribution of the measured gene expressions. Existing approaches typically handle dropout error via imputation, which may introduce spurious relations as the true joint distribution is generally unidentifiable. To tackle this issue, we introduce a causal graphical model to characterize the dropout mechanism, namely, Causal Dropout Model. We provide a simple yet effective theoretical result: interestingly, the conditional independence (CI) relations in the data with dropouts, after deleting the samples with zero values (regardless if technical or not) for the conditioned variables, are asymptotically identical to the CI relations in the original data without dropouts. This particular test-wise deletion procedure, in which we perform CI tests on the samples without zeros for the conditioned variables, can be seamlessly integrated with existing structure learning approaches including constraint-based and greedy score-based methods, thus giving rise to a principled framework for GRNI in the presence of dropouts. We further show that the causal dropout model can be validated from data, and many existing statistical models to handle dropouts fit into our model as specific parametric instances. Empirical evaluation on synthetic, curated, and real-world experimental transcriptomic data comprehensively demonstrate the efficacy of our method.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=gFR4QwK53h", "arxiv_id": "2403.15500", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 17454, "title": "SALMON: Self-Alignment with Principle-Following Reward Models", "authors": [ "Zhiqing Sun", "Yikang Shen", "Hongxin Zhang", "Qinhong Zhou", "Zhenfang Chen", "David Daniel Cox", "Yiming Yang", "Chuang Gan" ], "abstract": "Supervised Fine-Tuning (SFT) on human demonstrations combined with Reinforcement Learning from Human Feedback (RLHF) constitutes a powerful alignment paradigm for Large Language Model (LLM) AI-assistant agents. However, a significant limitation of this approach is its substantial dependency on high-quality human annotations, making its broader application to intricate tasks challenging due to difficulties in obtaining consistent response demonstrations and task-specific response preferences. To address this issue, we present a novel alignment paradigm in this paper, termed SALMON (Self-ALignMent with principle-fOllowiNg reward models). This paradigm offers the ability to align base language models with minimal human supervision, using only a select set of human-defined principles, yet achieves superior performance. Central to our approach is a principle-following reward model. Trained on synthetic preference data, this reward model can generate reward scores based on arbitrary human-defined principles. Therefore, during the RL training phase, by merely adjusting these principles, we gain full control over the preferences of the reward model, subsequently influencing the behavior of the RL-trained policy model, and eliminating the traditional reliance on exhaustive online human preference collection. Applying our method to the LLaMA-2-70b base language model, we developed an AI assistant named Dromedary-2. With only 6 exemplars for in-context learning and 31 human-defined principles, Dromedary-2 significantly surpasses the performance of several state-of-the-art AI systems, including LLaMA-2-Chat-70b, on various benchmark datasets. We have open-sourced the code and model weights to encourage further research into aligning LLM-based AI agents with enhanced supervision efficiency, improved controllability, and scalable oversight.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=xJbsmB8UMx", "arxiv_id": "2310.05910", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 17693, "title": "Functional Interpolation for Relative Positions improves Long Context Transformers", "authors": [ "Shanda Li", "Chong You", "Guru Guruganesh", "Joshua Ainslie", "Santiago Ontanon", "Manzil Zaheer", "Sumit Sanghai", "Yiming Yang", "Sanjiv Kumar", "Srinadh Bhojanapalli" ], "abstract": "Preventing the performance decay of Transformers on inputs longer than those used for training has been an important challenge in extending the context length of these models. Though the Transformer architecture has fundamentally no limits on the input sequence lengths it can process, the choice of position encoding used during training can limit the performance of these models on longer inputs. We propose a novel functional relative position encoding with progressive interpolation, FIRE, to improve Transformer generalization to longer contexts. We theoretically prove that this can represent some of the popular relative position encodings, such as T5's RPE, Alibi, and Kerple. We next empirically show that FIRE models have better generalization to longer contexts on both zero-shot language modeling and long text benchmarks.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=rR03qFesqk", "arxiv_id": "2310.04418", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 17841, "title": "Searching for High-Value Molecules Using Reinforcement Learning and Transformers", "authors": [ "Raj Ghugare", "Santiago Miret", "Adriana Hugessen", "Mariano Phielipp", "Glen Berseth" ], "abstract": "Reinforcement learning (RL) over text representations can be effective for finding high-value policies that can search over graphs. However, RL requires careful structuring of the search space and algorithm design to be effective in this challenge. Through extensive experiments, we explore how different design choices for text grammar and algorithmic choices for training can affect an RL policy's ability to generate molecules with desired properties. We arrive at a new RL-based molecular design algorithm (ChemRLformer) and perform a thorough analysis using 25 molecule design tasks, including computationally complex protein docking simulations. From this analysis, we discover unique insights in this problem space and show that ChemRLformer achieves state-of-the-art performance while being more straightforward than prior work by demystifying which design choices are actually helpful for text-based molecule design.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=nqlymMx42E", "arxiv_id": "2310.02902", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 18045, "title": "Learning Performance-Improving Code Edits", "authors": [ "Alexander G Shypula", "Aman Madaan", "Yimeng Zeng", "Uri Alon", "Jacob R. Gardner", "Yiming Yang", "Milad Hashemi", "Graham Neubig", "Parthasarathy Ranganathan", "Osbert Bastani", "Amir Yazdanbakhsh" ], "abstract": "With the waning of Moore's law, optimizing program performance has become a major focus of software research. However, high-level optimizations such as API and algorithm changes remain elusive due to the difficulty of understanding the semantics of code.Simultaneously, pretrained large language models (LLMs) have demonstrated strong capabilities at solving a wide range of programming tasks.To that end, we introduce a framework for adapting LLMs to high-level program optimization.First, we curate a dataset of performance-improving edits made by human programmers of over 77,000 competitive C++ programming submission pairs, accompanied by extensive unit tests.A major challenge is the significant variability of measuring performance on commodity hardware, which can lead to spurious \"improvements\".To isolate and reliably evaluate the impact of program optimizations, we design an environment based on the gem5 full system simulator, the de facto simulator used in academia and industry.Next, we propose a broad range of adaptation strategies for code optimization; for prompting, these include retrieval-based few-shot prompting and chain-of-thought, and for finetuning, these include performance-conditioned generation and synthetic data augmentation based on self-play.A combination of these techniques achieves an average speedup of 5.65 times on CodeLlama-13B and 6.86 times on GPT-3.5, surpassing the best human performance (4.06 times).We find our proposed performance-conditioned generation is particularly effective at improving performance as well as increasing the fraction of optimized programs.", "type": "Spotlight Poster", "OpenReview": "https://openreview.net/forum?id=ix7rLVHXyY", "arxiv_id": "2302.07867", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19730, "title": "An Analytical Solution to Gauss-Newton Loss for Direct Image Alignment", "authors": [ "Sergei Solonets", "Daniil Sinitsyn", "Lukas Von Stumberg", "Nikita Araslanov", "Daniel Cremers" ], "abstract": "Direct image alignment is a widely used technique for relative 6DoF pose estimation between two images, but its accuracy strongly depends on pose initialization.Therefore, recent end-to-end frameworks focused on training objectives, such as the Gauss-Newton loss, which increase the convergence basin of the learned feature descriptors.However, the training data may be biased toward a specific type of motion and pose initialization,thus limiting the generalization of these methods.In this work, we derive a closed-form solution to the expected optimum of the Gauss-Newton loss. The solution is agnostic to the underlying feature representation and allows us to dynamically adjust the basin of convergence according to our assumptions about the uncertainty in the current estimates. This offers effective control over the convergence properties of the algorithm.Despite using self-supervised feature embeddings, our solution achieves compelling accuracy w.r.t. the state-of-the-art direct image alignment methods trained end-to-end with pose supervision, and exhibits improved robustness to pose initialization.Our analytical solution provides insight into the inherent limitations of end-to-end learning with the Gauss-Newton loss and establishes an intriguing connection between direct image alignment and feature-matching approaches.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=mE52zURNGc", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19780, "title": "Accelerating Distributed Stochastic Optimization via Self-Repellent Random Walks", "authors": [ "Jie Hu", "Vishwaraj Doshi", "Do Young Eun" ], "abstract": "We study a family of distributed stochastic optimization algorithms where gradients are sampled by a token traversing a network of agents in random-walk fashion. Typically, these random-walks are chosen to be Markov chains that asymptotically sample from a desired target distribution, and play a critical role in the convergence of the optimization iterates. In this paper, we take a novel approach by replacing the standard *linear* Markovian token by one which follows a *non-linear* Markov chain - namely the Self-Repellent Radom Walk (SRRW). Defined for any given 'base' Markov chain, the SRRW, parameterized by a positive scalar $\\\\alpha$, is less likely to transition to states that were highly visited in the past, thus the name. In the context of MCMC sampling on a graph, a recent breakthrough in Doshi et al. (2023) shows that the SRRW achieves $O(1/\\\\alpha)$ decrease in the asymptotic variance for sampling. We propose the use of a `generalized' version of the SRRW to drive token algorithms for distributed stochastic optimization in the form of stochastic approximation, termed SA-SRRW. We prove that the optimization iterate errors of the resulting SA-SRRW converge to zero almost surely and prove a central limit theorem, deriving the explicit form of the resulting asymptotic covariance matrix corresponding to iterate errors. This asymptotic covariance is always smaller than that of an algorithm driven by the base Markov chain and decreases at rate $O(1/\\\\alpha^2)$ - the performance benefit of using SRRW thereby *amplified* in the stochastic optimization context. Empirical results support our theoretical findings.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=BV1PHbTJzd", "arxiv_id": "2401.09665", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19770, "title": "Improved Active Learning via Dependent Leverage Score Sampling", "authors": [ "Atsushi Shimizu", "Xiaoou Cheng", "Christopher Musco", "Jonathan Weare" ], "abstract": "We show how to obtain improved active learning methods in the agnostic (adversarial noise) setting by combining marginal leverage score sampling with non-independent sampling strategies that promote spatial coverage. In particular, we propose an easily implemented method based on the \\emph{pivotal sampling algorithm}, which we test on problems motivated by learning-based methods for parametric PDEs and uncertainty quantification. In comparison to independent sampling, our method reduces the number of samples needed to reach a given target accuracy by up to $50\\%$.We support our findings with two theoretical results. First, we show that any non-independent leverage score sampling method that obeys a weak \\emph{one-sided $\\ell_{\\infty}$ independence condition} (which includes pivotal sampling) can actively learn $d$ dimensional linear functions with $O(d\\log d)$ samples, matching independent sampling. This result extends recent work on matrix Chernoff bounds under $\\ell_{\\infty}$ independence, and may be of interest for analyzing other sampling strategies beyond pivotal sampling. Second, we show that, for the important case of polynomial regression, our pivotal method obtains an improved bound of $O(d)$ samples.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=IYxDy2jDFL", "arxiv_id": "2310.04966", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 17641, "title": "NeuroBack: Improving CDCL SAT Solving using Graph Neural Networks", "authors": [ "Wenxi Wang", "Yang Hu", "Mohit Tiwari", "Sarfraz Khurshid", "Kenneth McMillan", "Risto Miikkulainen" ], "abstract": "Propositional satisfiability (SAT) is an NP-complete problem that impacts manyresearch fields, such as planning, verification, and security. Mainstream modernSAT solvers are based on the Conflict-Driven Clause Learning (CDCL) algorithm.Recent work aimed to enhance CDCL SAT solvers using Graph Neural Networks(GNNs). However, so far this approach either has not made solving more effective,or required substantial GPU resources for frequent online model inferences. Aimingto make GNN improvements practical, this paper proposes an approach calledNeuroBack, which builds on two insights: (1) predicting phases (i.e., values) ofvariables appearing in the majority (or even all) of the satisfying assignments areessential for CDCL SAT solving, and (2) it is sufficient to query the neural modelonly once for the predictions before the SAT solving starts. Once trained, theoffline model inference allows NeuroBack to execute exclusively on the CPU,removing its reliance on GPU resources. To train NeuroBack, a new dataset calledDataBack containing 120,286 data samples is created. Finally, NeuroBack is implementedas an enhancement to a state-of-the-art SAT solver called Kissat. As a result,it allowed Kissat to solve 5.2% more problems on the recent SAT competitionproblem set, SATCOMP-2022. NeuroBack therefore shows how machine learningcan be harnessed to improve SAT solving in an effective and practical manner.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=samyfu6G93", "arxiv_id": "2110.14053", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19731, "title": "Unprocessing Seven Years of Algorithmic Fairness", "authors": [ "Andr\u00e9 Cruz", "Moritz Hardt" ], "abstract": "Seven years ago, researchers proposed a postprocessing method to equalize the error rates of a model across different demographic groups. The work launched hundreds of papers purporting to improve over the postprocessing baseline. We empirically evaluate these claims through thousands of model evaluations on several tabular datasets. We find that the fairness-accuracy Pareto frontier achieved by postprocessing contains all other methods we were feasibly able to evaluate. In doing so, we address two common methodological errors that have confounded previous observations. One relates to the comparison of methods with different unconstrained base models. The other concerns methods achieving different levels of constraint relaxation. At the heart of our study is a simple idea we call unprocessing that roughly corresponds to the inverse of postprocessing. Unprocessing allows for a direct comparison of methods using different underlying models and levels of relaxation.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=jr03SfWsBS", "arxiv_id": "2306.07261", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19400, "title": "Two-timescale Extragradient for Finding Local Minimax Points", "authors": [ "Jiseok Chae", "Kyuwon Kim", "Donghwan Kim" ], "abstract": "Minimax problems are notoriously challenging to optimize. However, we present that two-timescale extragradient can be a viable solution. By utilizing dynamical systems theory, we show that it converges to points that satisfy the second-order necessary condition of local minimax points, under mild conditions. This work provably improves upon all previous results as we eliminate a crucial assumption that the Hessian, with respect to the maximization variable, is nondegenerate.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=6CIGhcJYJH", "arxiv_id": "2305.16242", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19399, "title": "DrS: Learning Reusable Dense Rewards for Multi-Stage Tasks", "authors": [ "Tongzhou Mu", "Minghua Liu", "Hao Su" ], "abstract": "The success of many RL techniques heavily relies on human-engineered dense rewards, which typically demands substantial domain expertise and extensive trial and error. In our work, we propose **DrS** (**D**ense **r**eward learning from **S**tages), a novel approach for learning *reusable* dense rewards for multi-stage tasks in a data-driven manner. By leveraging the stage structures of the task, DrS learns a high-quality dense reward from sparse rewards and demonstrations if given. The learned rewards can be *reused* in unseen tasks, thus reducing the human effort for reward engineering. Extensive experiments on three physical robot manipulation task families with 1000+ task variants demonstrate that our learned rewards can be reused in unseen tasks, resulting in improved performance and sample efficiency of RL algorithms. The learned rewards even achieve comparable performance to human-engineered rewards on some tasks. See our [project page](https://sites.google.com/view/iclr24drs) for videos.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=6CZ50WgfCG", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19027, "title": "Duolando: Follower GPT with Off-Policy Reinforcement Learning for Dance Accompaniment", "authors": [ "Li Siyao", "Tianpei Gu", "Zhitao Yang", "Zhengyu Lin", "Ziwei Liu", "Henghui Ding", "Lei Yang", "Chen Change Loy" ], "abstract": "We introduce a novel task within the field of human motion generation, termed dance accompaniment, which necessitates the generation of responsive movements from a dance partner, the \"follower\", synchronized with the lead dancer\u2019s movements and the underlying musical rhythm. Unlike existing solo or group dance generation tasks, a duet dance scenario entails a heightened degree of interaction between the two participants, requiring delicate coordination in both pose and position. To support this task, we first build a large-scale and diverse duet interactive dance dataset, DD100, by recording about 115.4 minutes of professional dancers\u2019 performances. To address the challenges inherent in this task, we propose a GPT based model, Duolando, which autoregressively predicts the subsequent tokenized motion conditioned on the coordinated information of the music, the leader\u2019s and the follower\u2019s movements. To further enhance the GPT\u2019s capabilities of generating stable results on unseen conditions (music and leader motions), we devise an off-policy reinforcement learning strategy that allows the model to explore viable trajectories from out-of-distribution samplings, guided by human-defined rewards. Based on the collected dataset and proposed method, we establish a benchmark with several carefully designed metrics.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=GW4j4n2cjH", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19398, "title": "Personalize Segment Anything Model with One Shot", "authors": [ "Renrui Zhang", "Zhengkai Jiang", "Ziyu Guo", "Shilin Yan", "Junting Pan", "Hao Dong", "Yu Qiao", "Peng Gao", "Hongsheng Li" ], "abstract": "Driven by large-data pre-training, Segment Anything Model (SAM) has been demonstrated as a powerful promptable framework, revolutionizing the segmentation field. Despite the generality, customizing SAM for specific visual concepts without man-powered prompting is under-explored, e.g., automatically segmenting your pet dog in numerous images. In this paper, we introduce a training-free Personalization approach for SAM, termed PerSAM. Given only one-shot data, i.e., a single image with a reference mask, we first obtain a positive-negative location prior for the target concept in new images. Then, aided by target visual semantics, we empower SAM for personalized object segmentation via two proposed techniques: target-guided attention and target-semantic prompting. In this way, we can effectively customize the general-purpose SAM for private use without any training. To further alleviate the ambiguity of segmentation scales, we present an efficient one-shot fine-tuning variant, PerSAM-F. Freezing the entire SAM, we introduce a scale-aware fine-tuning to aggregate multi-scale masks, which only tunes 2 parameters within 10 seconds for improved performance. To demonstrate our efficacy, we construct a new dataset, PerSeg, for the evaluation of personalized object segmentation, and also test our methods on various one-shot image and video segmentation benchmarks. Besides, we propose to leverage PerSAM to improve DreamBooth for personalized text-to-image synthesis. By mitigating the disturbance of training-set backgrounds, our approach showcases better target appearance generation and higher fidelity to the input text prompt.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=6Gzkhoc6YS", "arxiv_id": "2305.03048", "GitHub": [ "https://github.com/ZrrSkywalker/Personalize-SAM" ], "Space": [], "Model": [], "Dataset": [] }, { "id": 19397, "title": "Conserve-Update-Revise to Cure Generalization and Robustness Trade-off in Adversarial Training", "authors": [ "Shruthi Gowda", "Bahram Zonooz", "Elahe Arani" ], "abstract": "Adversarial training improves the robustness of neural networks against adversarial attacks, albeit at the expense of the trade-off between standard and robust generalization.To unveil the underlying factors driving this phenomenon, we examine the layer-wise learning capabilities of neural networks during the transition from a standard to an adversarial setting. Our empirical findings demonstrate that selectively updating specific layers while preserving others can substantially enhance the network's learning capacity. We, therefore, propose CURE, a novel training framework that leverages a gradient prominence criterion to perform selective conservation, updating, and revision of weights. Importantly, CURE is designed to be dataset- and architecture-agnostic, ensuring its applicability across various scenarios. It effectively tackles both memorization and overfitting issues, thus enhancing the trade-off between robustness and generalization and additionally, this training approach also aids in mitigating \"robust overfitting\". Furthermore, our study provides valuable insights into the mechanisms of selective adversarial training and offers a promising avenue for future research.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=6IjN7oxjXt", "arxiv_id": "2401.14948", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19024, "title": "MMD Graph Kernel: Effective Metric Learning for Graphs via Maximum Mean Discrepancy", "authors": [ "Yan Sun", "Jicong Fan" ], "abstract": "This paper focuses on graph metric learning. First, we present a class of maximum mean discrepancy (MMD) based graph kernels, called MMD-GK. These kernels are computed by applying MMD to the node representations of two graphs with message-passing propagation. Compared to classical graph kernels such as the Weisfeiler-Lehman kernel, our MMD-GKs have much lower computational costs and are able to exploit nodes' features of graphs effectively. Secondly, we provide a class of deep MMD-GKs that are able to learn graph metrics and implicit graph features adaptively in an unsupervised manner. Thirdly, we propose a class of supervised deep MMD-GKs that are able to utilize label information of graphs and hence yield more discriminative metrics. Besides the algorithms, we provide theoretical analysis for the proposed methods. The proposed methods are evaluated in comparison to many baselines such as graph kernels and graph neural networks in the tasks of graph clustering and graph classification. The numerical results demonstrate the effectiveness and superiority of our methods.", "type": "Spotlight Poster", "OpenReview": "https://openreview.net/forum?id=GZ6AcZwA8r", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19396, "title": "Principled Federated Domain Adaptation: Gradient Projection and Auto-Weighting", "authors": [ "Enyi Jiang", "Yibo Jacky Zhang", "Sanmi Koyejo" ], "abstract": "Federated Domain Adaptation (FDA) describes the federated learning (FL) setting where source clients and a server work collaboratively to improve the performance of a target client where limited data is available. The domain shift between the source and target domains, coupled with limited data of the target client, makes FDA a challenging problem, e.g., common techniques such as federated averaging and fine-tuning fail due to domain shift and data scarcity. To theoretically understand the problem, we introduce new metrics that characterize the FDA setting and a theoretical framework with novel theorems for analyzing the performance of server aggregation rules. Further, we propose a novel lightweight aggregation rule, Federated Gradient Projection ($\\texttt{FedGP}$), which significantly improves the target performance with domain shift and data scarcity. Moreover, our theory suggests an $\\textit{auto-weighting scheme}$ that finds the optimal combinations of the source and target gradients. This scheme improves both $\\texttt{FedGP}$ and a simpler heuristic aggregation rule. Extensive experiments verify the theoretical insights and illustrate the effectiveness of the proposed methods in practice.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=6J3ehSUrMU", "arxiv_id": "2302.05049", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19762, "title": "Learning Energy Decompositions for Partial Inference in GFlowNets", "authors": [ "Hyosoon Jang", "Minsu Kim", "Sungsoo Ahn" ], "abstract": "This paper studies generative flow networks (GFlowNets) to sample objects from the Boltzmann energy distribution via a sequence of actions. In particular, we focus on improving GFlowNet with partial inference: training flow functions with the evaluation of the intermediate states or transitions. To this end, the recently developed forward-looking GFlowNet reparameterizes the flow functions based on evaluating the energy of intermediate states. However, such an evaluation of intermediate energies may (i) be too expensive or impossible to evaluate and (ii) even provide misleading training signals under large energy fluctuations along the sequence of actions. To resolve this issue, we propose learning energy decompositions for GFlowNets (LED-GFN). Our main idea is to (i) decompose the energy of an object into learnable potential functions defined on state transitions and (ii) reparameterize the flow functions using the potential functions. In particular, to produce informative local credits, we propose to regularize the potential to change smoothly over the sequence of actions. It is also noteworthy that training GFlowNet with our learned potential can preserve the optimal policy. We empirically verify the superiority of LED-GFN in five problems including the generation of unstructured and maximum independent sets, molecular graphs, and RNA sequences.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=P15CHILQlg", "arxiv_id": "2310.03301", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19395, "title": "UniTabE: A Universal Pretraining Protocol for Tabular Foundation Model in Data Science", "authors": [ "Yazheng Yang", "Yuqi Wang", "Guang Liu", "Ledell Wu", "Qi Liu" ], "abstract": "Recent advancements in Natural Language Processing (NLP) have witnessed the groundbreaking impact of pretrained models, yielding impressive outcomes across various tasks. This study seeks to extend the power of pretraining methodologies to facilitating the prediction over tables in data science, a domain traditionally overlooked, yet inherently challenging due to the plethora of table schemas intrinsic to different tasks. The primary research questions underpinning this work revolve around the establishment of a universal pretraining protocol for tables with varied structures, the generalizability and transferability of learned knowledge across tasks, the adaptation to diverse downstream applications, and the incorporation of incremental columns over time. In response to these challenges, we introduce UniTabE, a straightforward yet effective method designed to process tables in a uniform manner, devoid of constraints imposed by specific table structures. UniTabE's core concept relies on representing each basic table element with a module, termed TabUnit. This is subsequently followed by a Transformer encoder to refine the representation. Moreover, our model is designed to facilitate pretraining and finetuning through the utilization of free-form prompts. In order to implement the pretraining phase, we curated an expansive tabular dataset comprising approximately 13 billion samples, meticulously gathered from the Kaggle platform. This research primarily centers on classification and regression tasks involving tabular data, and conducts rigorous experimental testing and analyses to validate the effectiveness of our methodology. The experimental results demonstrate UniTabE's superior performance against several baseline models across a multitude of benchmark datasets. This, therefore, underscores UniTabE's potential to significantly enhance the semantic representation of tabular data, thereby marking a significant stride for tabular data analysis.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=6LLho5X6xV", "arxiv_id": "2307.09249", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19759, "title": "Meta Continual Learning Revisited: Implicitly Enhancing Online Hessian Approximation via Variance Reduction", "authors": [ "Yichen Wu", "Long-Kai Huang", "Renzhen Wang", "Deyu Meng", "Ying Wei" ], "abstract": "Regularization-based methods have so far been among the *de facto* choices for continual learning. Recent theoretical studies have revealed that these methods all boil down to relying on the Hessian matrix approximation of model weights. However, these methods suffer from suboptimal trade-offs between knowledge transfer and forgetting due to fixed and unchanging Hessian estimations during training.Another seemingly parallel strand of Meta-Continual Learning (Meta-CL) algorithms enforces alignment between gradients of previous tasks and that of the current task. In this work we revisit Meta-CL and for the first time bridge it with regularization-based methods. Concretely, Meta-CL implicitly approximates Hessian in an online manner, which enjoys the benefits of timely adaptation but meantime suffers from high variance induced by random memory buffer sampling. We are thus highly motivated to combine the best of both worlds, through the proposal of Variance Reduced Meta-CL (VR-MCL) to achieve both timely and accurate Hessian approximation.Through comprehensive experiments across three datasets and various settings, we consistently observe that VR-MCL outperforms other SOTA methods, which further validates the effectiveness of VR-MCL.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=TpD2aG1h0D", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19746, "title": "Latent Trajectory Learning for Limited Timestamps under Distribution Shift over Time", "authors": [ "QIUHAO Zeng", "Changjian Shui", "Long-Kai Huang", "Peng Liu", "Xi Chen", "Charles Ling", "Boyu Wang" ], "abstract": "Distribution shifts over time are common in real-world machine-learning applications. This scenario is formulated as Evolving Domain Generalization (EDG), where models aim to generalize well to unseen target domains in a time-varying system by learning and leveraging the underlying evolving pattern of the distribution shifts across domains. However, existing methods encounter challenges due to the limited number of timestamps (every domain corresponds to a timestamp) in EDG datasets, leading to difficulties in capturing evolving dynamics and risking overfitting to the sparse timestamps, which hampers their generalization and adaptability to new tasks. To address this limitation, we propose a novel approach SDE-EDG that collects the Infinitely Fined-Grid Evolving Trajectory (IFGET) of the data distribution with continuous-interpolated samples to bridge temporal gaps (intervals between two successive timestamps). Furthermore, by leveraging the inherent capacity of Stochastic Differential Equations (SDEs) to capture continuous trajectories, we propose their use to align SDE-modeled trajectories with IFGET across domains, thus enabling the capture of evolving distribution trends. We evaluate our approach on several benchmark datasets and demonstrate that it can achieve superior performance compared to existing state-of-the-art methods.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=bTMMNT7IdW", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19393, "title": "Graphical Multioutput Gaussian Process with Attention", "authors": [ "Yijue Dai", "Wenzhong Yan", "Feng Yin" ], "abstract": "Integrating information while recognizing dependence from multiple data sources and enhancing the predictive performance of the multi-output regression are challenging tasks. Multioutput Gaussian Process (MOGP) methods offer outstanding solutions with tractable predictions and uncertainty quantification.However, their practical applications are hindered by high computational complexity and storage demand. Additionally, there exist model mismatches in existing MOGP models when dealing with non-Gaussian data. To improve the model representation ability in terms of flexibility, optimality, and scalability,this paper introduces a novel multi-output regression framework, termed Graphical MOGP (GMOGP), which is empowered by:(i) generating flexible Gaussian process priors consolidated from identified parents, (ii) providing dependent processes with attention-based graphical representations, and (iii) achieving Pareto optimal solutions via a distributed learning framework. Numerical results confirm that the proposed GMOGP significantly outperforms state-of-the-art MOGP alternatives in predictive performance, as well as in time and memory efficiency, across various synthetic and real datasets.Our code and datasets are available at https://anonymous.4open.science/r/GMOGP-5ED3/.", "type": "Spotlight Poster", "OpenReview": "https://openreview.net/forum?id=6N8TW504aa", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19726, "title": "ExeDec: Execution Decomposition for Compositional Generalization in Neural Program Synthesis", "authors": [ "Kensen Shi", "Joey Hong", "Yinlin Deng", "Pengcheng Yin", "Manzil Zaheer", "Charles Sutton" ], "abstract": "When writing programs, people have the ability to tackle a new complex task by decomposing it into smaller and more familiar subtasks. While it is difficult to measure whether neural program synthesis methods have similar capabilities, we can measure whether they compositionally generalize, that is, whether a model that has been trained on the simpler subtasks is subsequently able to solve more complex tasks. In this paper, we characterize several different forms of compositional generalization that are desirable in program synthesis, forming a meta-benchmark which we use to create generalization tasks for two popular datasets, RobustFill and DeepCoder. We then propose ExeDec, a novel decomposition-based synthesis strategy that predicts execution subgoals to solve problems step-by-step informed by program execution at each step. When used with Transformer models trained from scratch, ExeDec has better synthesis performance and greatly improved compositional generalization ability compared to baselines. Finally, we use our benchmarks to demonstrate that LLMs struggle to compositionally generalize when asked to do programming-by-example in a few-shot setting, but an ExeDec-style prompting approach can improve the generalization ability and overall performance.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=oTRwljRgiv", "arxiv_id": "2307.13883", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19392, "title": "NoiseDiffusion: Correcting Noise for Image Interpolation with Diffusion Models beyond Spherical Linear Interpolation", "authors": [ "PengFei Zheng", "Yonggang Zhang", "Zhen Fang", "Tongliang Liu", "Defu Lian", "Bo Han" ], "abstract": "Image interpolation based on diffusion models is promising in creating fresh and interesting images. Advanced interpolation methods mainly focus on linear spherical interpolation, delivering remarkable success for images generated by diffusion models.However, existing methods struggle with natural images (not generated by diffusion models), limiting practical applications.Our investigation into the interpolation process has unveiled that its shortcomings are rooted in the introduction of inappropriate noise, which may either exceed or fall below the denoising threshold, leading to issues such as image artifacts and information loss in the interpolated images. To address this issue, we initially investigated a direct noise addition method, which improved image quality but introduced unwanted information. Drawing from these findings, we subsequently developed a novel interpolation approach that harnesses the advantages of both techniques. This approach retains the valuable noise with information from the original images while introducing a subtle Gaussian noise to enhance interpolation quality. Moreover, we introduced an innovative constraint on the noise component responsible for generating artifacts and incorporated original image to supplement missing information.These enhancements not only improved the interpolation results for images within the training domain but also extended the capability to interpolate with natural images beyond the training domain, achieving in the best interpolation results to date.", "type": "Spotlight Poster", "OpenReview": "https://openreview.net/forum?id=6O3Q6AFUTu", "arxiv_id": "2403.08840", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19391, "title": "Dynamic Discounted Counterfactual Regret Minimization", "authors": [ "Hang Xu", "Kai Li", "Haobo Fu", "QIANG FU", "Junliang Xing", "Jian Cheng" ], "abstract": "Counterfactual regret minimization (CFR) is a family of iterative algorithms showing promising results in solving imperfect-information games. Recent novel CFR variants (e.g., CFR+, DCFR) have significantly improved the convergence rate of the vanilla CFR. The key to these CFR variants\u2019 performance is weighting each iteration non-uniformly, i.e., discounting earlier iterations. However, these algorithms use a fixed, manually-specified scheme to weight each iteration, which enormously limits their potential. In this work, we propose Dynamic Discounted CFR (DDCFR), the first equilibrium-finding framework that discounts prior iterations using a dynamic, automatically-learned scheme. We formalize CFR\u2019s iteration process as a carefully designed Markov decision process and transform the discounting scheme learning problem into a policy optimization problem within it. The learned discounting scheme dynamically weights each iteration on the fly using information available at runtime. Experimental results across multiple games demonstrate that DDCFR\u2019s dynamic discounting scheme has a strong generalization ability and leads to faster convergence with improved performance.", "type": "Spotlight Poster", "OpenReview": "https://openreview.net/forum?id=6PbvbLyqT6", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19023, "title": "Robust Model Based Reinforcement Learning Using $\\mathcal{L}_1$ Adaptive Control", "authors": [ "Minjun Sung", "Sambhu Harimanas Karumanchi", "Aditya Gahlawat", "Naira Hovakimyan" ], "abstract": "We introduce $\\mathcal{L}_1$-MBRL, a control-theoretic augmentation scheme for Model-Based Reinforcement Learning (MBRL) algorithms. Unlike model-free approaches, MBRL algorithms learn a model of the transition function using data and use it to design a control input. Our approach generates an approximate control-affine model of the learned transition function according to the switching law. Using the approximate model, control input produced by the underlying MBRL is perturbed by the $\\mathcal{L}_1$ adaptive control, which is designed to enhance the robustness of the system against uncertainties. Importantly, this approach is agnostic to the choice of MBRL algorithm, which enables the utilization of the scheme in various MBRL algorithms. Our method exhibits superior performance and sample efficiency on multiple MuJoCo environments, both with and without system noise, as demonstrated through numerical simulations.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=GaLCLvJaoF", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19790, "title": "LongLoRA: Efficient Fine-tuning of Long-Context Large Language Models", "authors": [ "Yukang Chen", "Shengju Qian", "Haotian Tang", "Xin Lai", "Zhijian Liu", "Song Han", "Jiaya Jia" ], "abstract": "We present LongLoRA, an efficient fine-tuning approach that extends the context sizes of pre-trained large language models (LLMs), with limited computation cost. Typically, training LLMs with long context sizes is computationally expensive, requiring extensive training hours and GPU resources. For example, training on the context length of 8192 needs 16x computational costs in self-attention layers as that of 2048. In this paper, we speed up the context extension of LLMs in two aspects. On the one hand, although dense global attention is needed during inference, fine-tuning the model can be effectively and efficiently done by sparse local attention. The proposed shift short attention effectively enables context extension, leading to non-trivial computation saving with similar performance to fine-tuning with vanilla attention. Particularly, it can be implemented with only two lines of code in training, while being optional in inference. On the other hand, we revisit the parameter-efficient fine-tuning regime for context expansion. Notably, we find that LoRA for context extension works well under the premise of trainable embedding and normalization. LongLoRA demonstrates strong empirical results on various tasks on Llama2 models from 7B/13B to 70B. LongLoRA adopts Llama2 7B from 4k context to 100k, or Llama2 70B to 32k on a single 8$\\times$ A100 machine. LongLoRA extends models' context while retaining their original architectures, and is compatible with most existing techniques, like Flash-Attention2. In addition, we further conduct supervised fine-tuning on our LongLoRA models, with long instruction-following data. Our code and models will be publicly available.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=6PmJoRfdaK", "arxiv_id": "2309.12307", "GitHub": [ "https://github.com/dvlab-research/LongLoRA" ], "Space": [], "Model": [], "Dataset": [] }, { "id": 19021, "title": "Dictionary Contrastive Learning for Efficient Local Supervision without Auxiliary Networks", "authors": [ "Suhwan Choi", "Myeongho Jeon", "Yeonjung Hwang", "Jeonglyul Oh", "Sungjun Lim", "Joonseok Lee", "Myungjoo Kang" ], "abstract": "While backpropagation (BP) has achieved widespread success in deep learning, it faces two prominent challenges; that is, computational inefficiency and biological implausibility. These issues arise from the requirements of feedback weight symmetry and the forward/backward pass locking. \"Forward learning\" (FL), an emerging alternative, updates each layer's weights during the forward pass, eliminating the need for backward error signal propagation to address these concerns. Recent approaches have leveraged contrastive learning as a specialized tool for this scenario. However, it still exhibits suboptimal performance in comparison to BP. Our investigation suggests that existing contrastive FL methods, which assess similarities among local features, are susceptible to the inclusion of task-irrelevant information. In response to this, we propose a straightforward FL objective within a contrastive learning framework, with the goal of enhancing the similarity between local features and label embeddings, i.e., Dictionary Contrastive Forward Learning (DC-FL). Consequently, our objective yields substantial performance improvements, outperforming other state-of-the-art forward learning techniques. Notably, our method closely approaches the performance achieved by BP while concurrently preserving superior memory efficiency.", "type": "Spotlight Poster", "OpenReview": "https://openreview.net/forum?id=Gg7cXo3S8l", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19020, "title": "Investigating the Benefits of Projection Head for Representation Learning", "authors": [ "Yihao Xue", "Eric Gan", "Jiayi Ni", "Siddharth Joshi", "Baharan Mirzasoleiman" ], "abstract": "Recently, multimodal contrastive learning (MMCL) approaches, such as CLIP \\citep{radford2021learning}, have achieved a remarkable success in learning representations that are robust against distribution shift and generalize to new domains. Despite the empirical success, the mechanism behind learning such generalizable representations is not understood. In this work, we rigorously analyze this problem and uncover two mechanisms behind MMCL's robustness: \\emph{intra-class contrasting}, which allows the model to learn features with a high variance, and \\emph{inter-class feature sharing}, where annotated details in one class help learning other classes better. Both mechanisms prevent spurious features that are over-represented in the training data to overshadow the generalizable core features. This yields superior zero-shot classification accuracy under distribution shift. Furthermore, we theoretically demonstrate the benefits of using rich captions on robustness and explore the effect of annotating different types of details in the captions. We validate our theoretical findings through experiments, including a well-designed synthetic experiment and an experiment involving training CLIP on MS COCO \\citep{lin2014microsoft} and evaluating the model on variations of shifted ImageNet.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=GgEAdqYPNA", "arxiv_id": "2403.11391", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19777, "title": "Multisize Dataset Condensation", "authors": [ "Yang He", "Lingao Xiao", "Joey Tianyi Zhou", "Ivor Tsang" ], "abstract": "While dataset condensation effectively enhances training efficiency, its application in on-device scenarios brings unique challenges. 1) Due to the fluctuating computational resources of these devices, there's a demand for a flexible dataset size that diverges from a predefined size. 2) The limited computational power on devices often prevents additional condensation operations. These two challenges connect to the \"subset degradation problem\" in traditional dataset condensation: a subset from a larger condensed dataset is often unrepresentative compared to directly condensing the whole dataset to that smaller size. In this paper, we propose Multisize Dataset Condensation (MDC) by **compressing N condensation processes into a single condensation process to obtain datasets with multiple sizes**. Specifically, we introduce an \"adaptive subset loss\" on top of the basic condensation loss to mitigate the \"subset degradation problem\". Our MDC method offers several benefits: 1) No additional condensation process is required; 2) reduced storage requirement by reusing condensed images. Experiments validate our findings on networks including ConvNet, ResNet and DenseNet, and datasets including SVHN, CIFAR-10, CIFAR-100 and ImageNet. For example, we achieved 6.40% average accuracy gains on condensing CIFAR-10 to ten images per class.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=FVhmnvqnsI", "arxiv_id": "2403.06075", "GitHub": [ "https://github.com/he-y/Multisize-Dataset-Condensation" ], "Space": [], "Model": [], "Dataset": [] }, { "id": 19019, "title": "Orbit-Equivariant Graph Neural Networks", "authors": [ "Matthew Morris", "Bernardo Cuenca Grau", "Ian Horrocks" ], "abstract": "Equivariance is an important structural property that is captured by architectures such as graph neural networks (GNNs). However, equivariant graph functions cannot produce different outputs for similar nodes, which may be undesirable when the function is trying to optimize some global graph property. In this paper, we define orbit-equivariance, a relaxation of equivariance which allows for such functions whilst retaining important structural inductive biases. We situate the property in the hierarchy of graph functions, define a taxonomy of orbit-equivariant functions, and provide four different ways to achieve non-equivariant GNNs. For each, we analyze their expressivity with respect to orbit-equivariance and evaluate them on two novel datasets, one of which stems from a real-world use-case of designing optimal bioisosteres.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=GkJOCga62u", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19779, "title": "InfoBatch: Lossless Training Speed Up by Unbiased Dynamic Data Pruning", "authors": [ "Ziheng Qin", "Kai Wang", "Zangwei Zheng", "Jianyang Gu", "Xiangyu Peng", "xu Zhao Pan", "Daquan Zhou", "Lei Shang", "Baigui Sun", "Xuansong Xie", "Yang You" ], "abstract": "Data pruning aims to obtain lossless performances with less overall cost. A common approach is to filter out samples that make less contribution to the training. This could lead to gradient expectation bias compared to the original data. To solve this problem, we propose InfoBatch, a novel framework aiming to achieve lossless training acceleration by unbiased dynamic data pruning. Specifically, InfoBatchrandomly prunes a portion of less informative samples based on the loss distribution and rescales the gradients of the remaining samples to approximate the original gradient. As a plug-and-play and architecture-agnostic framework, InfoBatch consistently obtains lossless training results on classification, semantic segmentation, vision pertaining, and instruction fine-tuning tasks. On CIFAR10/100, ImageNet-1K, and ADE20K, InfoBatch losslessly saves 40% overall cost. For pertaining MAE and diffusion model, InfoBatch can respectively save 24.8% and 27% cost. For LLaMA instruction fine-tuning, InfoBatch is also able to save 20% cost and is compatible with coreset selection methods. The code will be made public.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=C61sk5LsK6", "arxiv_id": "2303.04947", "GitHub": [ "https://github.com/henryqin1997/InfoBatch" ], "Space": [], "Model": [], "Dataset": [] }, { "id": 19715, "title": "ClimODE: Climate Forecasting With Physics-informed Neural ODEs", "authors": [ "Yogesh Verma", "Markus Heinonen", "Vikas Garg" ], "abstract": "Climate prediction traditionally relies on complex numerical simulations of atmospheric physics. Deep learning approaches, such as transformers, have recently challenged the simulation paradigm with complex network forecasts. However, they often act as data-driven black-box models that neglect the underlying physics and lack uncertainty quantification. We address these limitations with ClimODE, a spatiotemporal continuous-time process that implements a key principle of advection from statistical mechanics, namely, weather changes due to a spatial movement of quantities over time. ClimODE models precise weather evolution with value-conserving dynamics, learning global weather transport as a neural flow, which also enables estimating the uncertainty in predictions. Our approach outperforms existing data-driven methods in global and regional forecasting with an order of magnitude smaller parameterization, establishing a new state of the art.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=xuY33XhEGR", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19792, "title": "Mixed-Type Tabular Data Synthesis with Score-based Diffusion in Latent Space", "authors": [ "Hengrui Zhang", "Jiani Zhang", "Zhengyuan Shen", "Balasubramaniam Srinivasan", "Xiao Qin", "Christos Faloutsos", "Huzefa Rangwala", "George Karypis" ], "abstract": "Recent advances in tabular data generation have greatly enhanced synthetic data quality. However, extending diffusion models to tabular data is challenging due to the intricately varied distributions and a blend of data types of tabular data. This paper introduces TABSYN, a methodology that synthesizes tabular data by leveraging a diffusion model within a variational autoencoder (VAE) crafted latent space.The key advantages of the proposed TabSyn include (1) Generality: the ability to handle a broad spectrum of data types by converting them into a single unified space and explicitly capture inter-column relations, (2) Quality: optimizing the distribution of latent embeddings to enhance the subsequent training of diffusion models, which helps generate high-quality synthetic data, (3) Speed: much fewer number of reverse steps and faster synthesis speed than existing diffusion-based methods. Extensive experiments on six datasets with five metrics demonstrate that TabSyn outperforms existing methods. Specifically, it reduces the error rates by 86% and 67% for column-wise distribution and pair-wise column correlation estimations compared with the most competitive baselines, its superiority in accuratelylearning the data distributions of tabular data.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=4Ay23yeuz0", "arxiv_id": "2310.09656", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19775, "title": "On the Humanity of Conversational AI: Evaluating the Psychological Portrayal of LLMs", "authors": [ "Jen-tse Huang", "Wenxuan Wang", "Eric John Li", "Man Ho LAM", "Shujie Ren", "Youliang Yuan", "Wenxiang Jiao", "Zhaopeng Tu", "Michael Lyu" ], "abstract": "Large Language Models (LLMs) have recently showcased their remarkable capacities, not only in natural language processing tasks but also across diverse domains such as clinical medicine, legal consultation, and education. LLMs become more than mere applications, evolving into assistants capable of addressing diverse user requests. This narrows the distinction between human beings and artificial intelligence agents, raising intriguing questions regarding the potential manifestation of personalities, temperaments, and emotions within LLMs. In this paper, we propose a framework, PPBench, for evaluating diverse psychological aspects of LLMs. Comprising thirteen scales commonly used in clinical psychology, PPBench further classifies these scales into four distinct categories: personality traits, interpersonal relationships, motivational tests, and emotional abilities. Our study examines five popular models, namely \\texttt{text-davinci-003}, ChatGPT, GPT-4, LLaMA-2-7b, and LLaMA-2-13b. Additionally, we employ a jailbreak approach to bypass the safety alignment protocols and test the intrinsic natures of LLMs. We have made PPBench openly accessible via *\\footnote{The link is hidden due to anonymity. For reviewers, please refer to the supplementary materials.}.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=H3UayAQWoE", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19733, "title": "Less is More: Fewer Interpretable Region via Submodular Subset Selection", "authors": [ "Ruoyu Chen", "Hua Zhang", "Siyuan Liang", "Jingzhi Li", "Xiaochun Cao" ], "abstract": "Image attribution algorithms aim to identify important regions that are highly relevant to model decisions. Although existing attribution solutions can effectively assign importance to target elements, they still face the following challenges: 1) existing attribution methods generate inaccurate small regions thus misleading the direction of correct attribution, and 2) the model cannot produce good attribution results for samples with wrong predictions. To address the above challenges, this paper re-models the above image attribution problem as a submodular subset selection problem, aiming to enhance model interpretability using fewer regions. To address the lack of attention to local regions, we construct a novel submodular function to discover more accurate fine-grained interpretation regions. To enhance the attribution effect for all samples, we also impose four different constraints on the selection of sub-regions, i.e., confidence, effectiveness, consistency, and collaboration scores, to assess the importance of various subsets. Moreover, we also analyze the link between the validity of the submodular function and four constraints at the level of theoretical aspects. Extensive experiments show that the proposed method outperforms SOTA methods on two face datasets (Celeb-A and VGG-Face2) and one fine-grained dataset (CUB-200-2011). For correctly predicted samples, the proposed method improves the Deletion and Insertion scores with an average of 4.9% and 2.5% gain relative to HSIC-Attribution. For incorrectly predicted samples, our method achieves gains of 81.0% and 18.4% compared to the HSIC-Attribution algorithm in the average highest confidence and Insertion score respectively.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=jKTUlxo5zy", "arxiv_id": "2402.09164", "GitHub": [ "https://github.com/RuoyuChen10/SMDL-Attribution" ], "Space": [], "Model": [], "Dataset": [] }, { "id": 19712, "title": "On the Joint Interaction of Models, Data, and Features", "authors": [ "Yiding Jiang", "Christina Baek", "J Zico Kolter" ], "abstract": "Learning features from data is one of the defining characteristics of deep learning,but our theoretical understanding of the role features play in deep learning is stillrudimentary. To address this gap, we introduce a new tool, the interaction tensor,for empirically analyzing the interaction between data and model through features.With the interaction tensor, we make several key observations about how featuresare distributed in data and how models with different random seeds learn differentfeatures. Based on these observations, we propose a conceptual framework for fea-ture learning. Under this framework, the expected accuracy for a single hypothesisand agreement for a pair of hypotheses can both be derived in closed-form. Wedemonstrate that the proposed framework can explain empirically observed phenomena, including the recently discovered Generalization Disagreement Equality(GDE) that allows for estimating the generalization error with only unlabeled data.Further, our theory also provides explicit construction of natural data distributionsthat break the GDE. Thus, we believe this work provides valuable new insight intoour understanding of feature learning.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=ze7DOLi394", "arxiv_id": "2306.04793", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19753, "title": "Knowledge Card: Filling LLMs' Knowledge Gaps with Plug-in Specialized Language Models", "authors": [ "Shangbin Feng", "Weijia Shi", "Yuyang Bai", "Vidhisha Balachandran", "Tianxing He", "Yulia Tsvetkov" ], "abstract": "By design, large language models (LLMs) are static general-purpose models, expensive to retrain or update frequently. As they are increasingly adopted for knowledge-intensive tasks, it becomes evident that these design choices lead to failures to generate factual, relevant, and up-to-date knowledge. To this end, we propose Knowledge Card, a modular framework to plug in new factual and relevant knowledge into general-purpose LLMs. We first introduce knowledge cards---specialized language models trained on corpora from specific domains and sources. Knowledge cards serve as parametric repositories that are selected at inference time to generate background knowledge for the base LLM. We then propose three content selectors to dynamically select and retain information in documents generated by knowledge cards, specifically controlling for relevance, brevity, and factuality of outputs. Finally, we propose two complementary integration approaches to augment the base LLM with the (relevant, factual) knowledge curated from the specialized LMs. Through extensive experiments, we demonstrate that Knowledge Card achieves state-of-the-art performance on six benchmark datasets. Ultimately, Knowledge Card framework enables dynamic synthesis and updates of knowledge from diverse domains. Its modularity will ensure that relevant knowledge can be continuously updated through the collective efforts of the research community.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=WbWtOYIzIK", "arxiv_id": "2305.09955", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19743, "title": "Small-scale proxies for large-scale Transformer training instabilities", "authors": [ "Mitchell Wortsman", "Peter J Liu", "Lechao Xiao", "Katie E Everett", "Alexander A Alemi", "Ben Adlam", "John D Co-Reyes", "Izzeddin Gur", "Abhishek Kumar", "Roman Novak", "Jeffrey Pennington", "Jascha Sohl-Dickstein", "Kelvin Xu", "Jaehoon Lee", "Justin Gilmer", "Simon Kornblith" ], "abstract": "Teams that have trained large Transformer-based models have reported training instabilities at large scale that did not appear when training with the same hyperparameters at smaller scales. Although the causes of such instabilities are of scientific interest, the amount of resources required to reproduce them has made investigation difficult. In this work, we seek ways to reproduce and study training instability at smaller scales. First, we focus on two sources of training instability described in previous work: the growth of logits in attention layers (Dehghani et al., 2023) and divergence of the output logits from the log probabilities (Chowdhery et al., 2022). By measuring the relationship between learning rate and loss across scales, we show that these instabilities also appear in small models when training at high learning rates, and that mitigations previously employed at large scales are equally effective in this regime. This prompts us to investigate the extent to which other known optimizer and model interventions influence the sensitivity of the final loss to changes in the learning rate. To this end, we study methods such as warm-up, weight decay, and the MuParam (Yang et al., 2022), and combine techniques to train small models that achieve similar losses across orders of magnitude of learning rate variation. Finally, to conclude our exploration we study two cases where instabilities can be predicted before they emerge by examining the scaling behavior of model characteristics such as activation and gradient norms.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=d8w0pmvXbZ", "arxiv_id": "2309.14322", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19784, "title": "Zipformer: A faster and better encoder for automatic speech recognition", "authors": [ "Zengwei Yao", "Liyong Guo", "Xiaoyu Yang", "Wei Kang", "Fangjun Kuang", "Yifan Yang", "Zengrui Jin", "Long Lin", "Daniel Povey" ], "abstract": "The Conformer has become the most popular encoder model for automatic speech recognition (ASR). It adds convolution modules to a transformer to learn both local and global dependencies. In this work we describe a faster, more memory-efficient, and better-performing transformer, called Zipformer. Modeling changes include: 1) a U-Net-like encoder structure where middle stacks operate at lower frame rates; 2) reorganized block structure with more modules, within which we re-use attention weights for efficiency; 3) a modified form of LayerNorm called BiasNorm allows us to retain some length information; 4) new activation functions SwooshR and SwooshL work better than Swish. We also propose a new optimizer, called ScaledAdam, which scales the update by each tensor's current scale to keep the relative change about the same, and also explictly learns the parameter scale. It achieves faster converge and better performance than Adam. Extensive experiments on LibriSpeech, Aishell-1, and WenetSpeech datasets demonstrate the effectiveness of our proposed Zipformer over other state-of-the-art ASR models.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=9WD9KwssyT", "arxiv_id": "2310.11230", "GitHub": [ "https://github.com/k2-fsa/icefall" ], "Space": [], "Model": [], "Dataset": [] }, { "id": 19776, "title": "Candidate Label Set Pruning: A Data-centric Perspective for Deep Partial-label Learning", "authors": [ "Shuo He", "Chaojie Wang", "Guowu Yang", "Lei Feng" ], "abstract": "Partial-label learning (PLL) allows each training example to be equipped with a set of candidate labels. Existing deep PLL research focuses on a \\emph{learning-centric} perspective to design various training strategies for label disambiguation i.e., identifying the concealed true label from the candidate label set, for model training. However, when the size of the candidate label set becomes excessively large, these learning-centric strategies would be unable to find the true label for model training, thereby causing performance degradation. This motivates us to think from a \\emph{data-centric} perspective and pioneer a new PLL-related task called candidate label set pruning (CLSP) that aims to filter out certain potential false candidate labels in a training-free manner. To this end, we propose the first CLSP method based on the inconsistency between the representation space and the candidate label space. Specifically, for each candidate label of a training instance, if it is not a candidate label of the instance's nearest neighbors in the representation space, then it has a high probability of being a false label. Based on this intuition, we employ a per-example pruning scheme that filters out a specific proportion of high-probability false candidate labels. Theoretically, we prove an upper bound of the pruning error rate and analyze how the quality of representations affects our proposed method. Empirically, extensive experiments on both benchmark-simulated and real-world PLL datasets validate the great value of CLSP to significantly improve many state-of-the-art deep PLL methods.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=Fk5IzauJ7F", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19761, "title": "Never Train from Scratch: Fair Comparison of Long-Sequence Models Requires Data-Driven Priors", "authors": [ "Ido Amos", "Jonathan Berant", "Ankit Gupta" ], "abstract": "Modeling long-range dependencies across sequences is a longstanding goal in machine learning and has led to architectures, such as state space models, that dramatically outperform Transformers on long sequences. However, these impressive empirical gains have been by and large demonstrated on benchmarks (e.g. Long Range Arena), where models are randomly initialized and trained to predict a target label from an input sequence. In this work, we show that random initialization leads to gross overestimation of the differences between architectures and that pretraining with standard denoising objectives, *using only the downstream task data*, leads to dramatic gains across multiple architectures and to very small gaps between Transformers and state space models (SSMs). In stark contrast to prior works, we find vanilla Transformers to match the performance of S4 on Long Range Arena when properly pretrained, and we improve the best reported results of SSMs on the PathX-256 task by 20 absolute points. Subsequently, we analyze the utility of previously-proposed structured parameterizations for SSMs and show they become mostly redundant in the presence of data-driven initialization obtained through pretraining. Our work shows that, when evaluating different architectures on supervised tasks, incorporation of data-driven priors via pretraining is essential for reliable performance estimation, and can be done efficiently.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=PdaPky8MUn", "arxiv_id": "2310.02980", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19771, "title": "GNNCert: Deterministic Certification of Graph Neural Networks against Adversarial Perturbations", "authors": [ "zaishuo xia", "Han Yang", "Binghui Wang", "Jinyuan Jia" ], "abstract": "Graph classification, which aims to predict a label for a graph, has many real-world applications such as malware detection, fraud detection, and healthcare. However, many studies show an attacker could carefully perturb the structure and/or node features in a graph such that a graph classifier misclassifies the perturbed graph. Such vulnerability impedes the deployment of graph classification in security/safety-critical applications. Existing empirical defenses lack formal robustness guarantees and could be broken by adaptive or unknown attacks. Existing provable defenses have the following limitations: 1) they achieve sub-optimal robustness guarantees for graph structure perturbation, 2) they cannot provide robustness guarantees for arbitrarily node feature perturbations, 3) their robustness guarantees are probabilistic, meaning they could be incorrect with a non-zero probability, and 4) they incur large computation costs. We aim to address those limitations in this work. We propose GraphGuard, a certified defense against both graph structure and node feature perturbations for graph classification. Our GraphGuard provably predicts the same label for a graph when the number of perturbed edges and the number of nodes with perturbed features are bounded. Our results on 8 benchmark datasets show GraphGuard outperforms three state-of-the-art methods.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=IGzaH538fz", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19769, "title": "Proving Test Set Contamination in Black-Box Language Models", "authors": [ "Yonatan Oren", "Nicole Meister", "Niladri S. Chatterji", "Faisal Ladhak", "Tatsunori Hashimoto" ], "abstract": "Large language models are trained on vast amounts of internet data, prompting concerns that they have memorized public benchmarks. Detecting this type of contamination is challenging because the pretraining data used by proprietary models are often not publicly accessible.We propose a procedure for detecting test set contamination of language models with exact false positive guarantees and without access to pretraining data or model weights. Our approach leverages the fact that when there is no data contamination, all orderings of an exchangeable benchmark should be equally likely. In contrast, the tendency for language models to memorize example order means that a contaminated language model will find certain canonical orderings to be much more likely than others. Our test flags potential contamination whenever the likelihood of a canonically ordered benchmark dataset is significantly higher than the likelihood after shuffling the examples.We demonstrate that our procedure is sensitive enough to reliably detect contamination in challenging situations, including models as small as 1.4 billion parameters, on small test sets only 1000 examples, and datasets that appear only a few times in the pretraining corpus. Finally, we evaluate LLaMA-2 to apply our test in a realistic setting and find our results to be consistent with existing contamination evaluations.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=KS8mIvetg2", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19750, "title": "LLMCarbon: Modeling the End-to-End Carbon Footprint of Large Language Models", "authors": [ "Ahmad Faiz", "Sotaro Kaneda", "Ruhan Wang", "Rita Chukwunyere Osi", "Prateek Sharma", "Fan Chen", "Lei Jiang" ], "abstract": "The carbon footprint of large language models (LLMs) is substantial, stemming from their training, inference, experimentation, and storage processes, encompassing both operational and embodied carbon emissions. Precisely assessing the carbon impact of emerging LLMs before their actual training, which involves substantial GPU usage, is crucial. Although many previous studies have reported the carbon footprint of LLM training, only one prior tool, mlco2, can predict the carbon footprint of new neural networks before their physical training. However, mlco2 exhibits several limitations. Firstly, it cannot extend its carbon footprint estimation to include dense or mixture-of-experts (MoE) LLMs. Secondly, mlco2 disregards essential architectural parameters of networks, such as parameter counts, leading to inflated projections. Thirdly, mlco2 focuses solely on GPUs, excluding TPUs and assuming uniform peak computing throughput across GPUs, resulting in imprecise carbon footprint estimations. Lastly, mlco2 cannot model the embodied carbon footprint of an LLM. To address these gaps, we present an end-to-end carbon footprint projection model, LLMCarbon, designed for both dense and MoE LLMs. Compared to mlco2, LLMCarbon greatly improves the estimation accuracy of the carbon footprint of various LLMs.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=aIok3ZD9to", "arxiv_id": "2309.14393", "GitHub": [ "https://github.com/SotaroKaneda/MLCarbon" ], "Space": [], "Model": [], "Dataset": [] }, { "id": 18598, "title": "Efficient local linearity regularization to overcome catastrophic overfitting", "authors": [ "Elias Abad Rocamora", "Fanghui Liu", "Grigorios Chrysos", "Pablo M. Olmos", "Volkan Cevher" ], "abstract": "Catastrophic overfitting (CO) in single-step adversarial training (AT) results in abrupt drops in the adversarial test accuracy (even down to $0$%). For models trained with multi-step AT, it has been observed that the loss function behaves locally linearly with respect to the input, this is however lost in single-step AT. To address CO in single-step AT, several methods have been proposed to enforce local linearity of the loss via regularization. However, these regularization terms considerably slow down training due to *Double Backpropagation*. Instead, in this work, we introduce a regularization term, called ELLE, to mitigate CO *effectively* and *efficiently* in classical AT evaluations, as well as some more difficult regimes, e.g., large adversarial perturbations and long training schedules. Our regularization term can be theoretically linked to curvature of the loss function and is computationally cheaper than previous methods by avoiding *Double Backpropagation*. Our thorough experimental validation demonstrates that our work does not suffer from CO, even in challenging settings where previous works suffer from it. We also notice that adapting our regularization parameter during training (ELLE-A) greatly improves the performance, specially in large $\\epsilon$ setups.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=SZzQz8ikwg", "arxiv_id": "2401.11618", "GitHub": [ "https://github.com/LIONS-EPFL/ELLE" ], "Space": [], "Model": [], "Dataset": [] }, { "id": 19791, "title": "Interpreting CLIP's Image Representation via Text-Based Decomposition", "authors": [ "Yossi Gandelsman", "Alexei A Efros", "Jacob Steinhardt" ], "abstract": "We investigate the CLIP image encoder by analyzing how individual model components affect the final representation. We decompose the image representation as a sum across individual image patches, model layers, and attention heads, and use CLIP's text representation to interpret the summands. Interpreting the attention heads, we characterize each head's role by automatically finding text representations that span its output space, which reveals property-specific roles for many heads (e.g.~location or shape). Next, interpreting the image patches, we uncover an emergent spatial localization within CLIP. Finally, we use this understanding to remove spurious features from CLIP and to create a strong zero-shot image segmenter. Our results indicate that scalable understanding of transformer models is attainable and can be used to repair and improve models.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=5Ca9sSzuDp", "arxiv_id": "2310.05916", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 18597, "title": "Horizon-Free Regret for Linear Markov Decision Processes", "authors": [ "Zhang Zihan", "Jason D. Lee", "Yuxin Chen", "Simon Shaolei Du" ], "abstract": "A recent line of works showed regret bounds in reinforcement learning (RL) can be (nearly) independent of planning horizon, a.k.a. the horizon-free bounds. However, these regret bounds only apply to settings where a polynomial dependency on the size of transition model is allowed, such as tabular Markov Decision Process (MDP) and linear mixture MDP. We give the first horizon-free bound for the popular linear MDP setting where the size of the transition model can be exponentially large or even uncountable. In contrast to prior works which explicitly estimate the transition model and compute the inhomogeneous value functions at different time steps, we directly estimate the value functions and confidence sets. We obtain the horizon-free bound by: (1) maintaining multiple weighted least square estimators for the value functions; and (2) a structural lemma which shows the maximal total variation of the inhomogeneous value functions is bounded by a polynomial factor of the feature dimension.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=SdBApv9iT4", "arxiv_id": "2403.10738", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19018, "title": "FeatUp: A Model-Agnostic Framework for Features at Any Resolution", "authors": [ "Stephanie Fu", "Mark Hamilton", "Laura E. Brandt", "Axel Feldmann", "Zhoutong Zhang", "William T. Freeman" ], "abstract": "Deep features are a cornerstone of computer vision research, capturing image semantics and enabling the community to solve downstream tasks even in the zero- or few-shot regime. However, these features often lack the spatial resolution to directly perform dense prediction tasks like segmentation and depth prediction because models aggressively pool information over large areas. In this work, we introduce FeatUp, a task- and model-agnostic framework to restore lost spatial information in deep features. We introduce two variants of FeatUp: one that guides features with high-resolution signal in a single forward pass, and one that fits an implicit model to a single image to reconstruct features at any resolution. Both approaches use a multi-view consistency loss with deep analogies to NeRFs. Our features retain their original semantics and can be swapped into existing applications to yield resolution and performance gains even without re-training. We show that FeatUp significantly outperforms other feature upsampling and image super-resolution approaches in class activation map generation, transfer learning for segmentation and depth prediction, and end-to-end training for semantic segmentation.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=GkJiNn2QDF", "arxiv_id": "2403.10516", "GitHub": [ "https://github.com/mhamilton723/FeatUp" ], "Space": [ "mhamilton723/FeatUp" ], "Model": [], "Dataset": [] }, { "id": 19719, "title": "Quick-Tune: Quickly Learning Which Pretrained Model to Finetune and How", "authors": [ "Sebastian Pineda Arango", "Fabio Ferreira", "Arlind Kadra", "Frank Hutter", "Josif Grabocka" ], "abstract": "With the ever-increasing number of pretrained models, machine learning practitioners are continuously faced with which pretrained model to use, and how to finetune it for a new dataset. In this paper, we propose a methodology that jointly searches for the optimal pretrained model and the hyperparameters for finetuning it. Our method transfers knowledge about the performance of many pretrained models with multiple hyperparameter configurations on a series of datasets. To this aim, we evaluated over 20k hyperparameter configurations for finetuning 24 pretrained image classification models on 87 datasets to generate a large-scale meta-dataset. We meta-learn a gray-box performance predictor on the learning curves of this meta-dataset and use it for fast hyperparameter optimization on new datasets. We empirically demonstrate that our resulting approach can quickly select an accurate pretrained model for a new dataset together with its optimal hyperparameters.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=tqh1zdXIra", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19735, "title": "Fine-tuning Aligned Language Models Compromises Safety, Even When Users Do Not Intend To!", "authors": [ "Xiangyu Qi", "Yi Zeng", "Tinghao Xie", "Pin-Yu Chen", "Ruoxi Jia", "Prateek Mittal", "Peter Henderson" ], "abstract": "Optimizing large language models (LLMs) for downstream use cases often involves the customization of pre-trained LLMs through further fine-tuning. Meta's open-source release of Llama models and OpenAI's APIs for fine-tuning GPT-3.5 Turbo on customized datasets accelerate this trend. But, what are the safety costs associated with such customized fine-tuning? While existing safety alignment techniques restrict harmful behaviors of LLMs at inference time, they do not cover safety risks when fine-tuning privileges are extended to end-users. Our red teaming studies find that the safety alignment of LLMs can be compromised by fine-tuning with only a few adversarially designed training examples. For instance, we jailbreak GPT-3.5 Turbo's safety guardrails by fine-tuning it on only 10 such examples at a cost of less than $0.20 via OpenAI's APIs, making the model responsive to nearly any harmful instructions. Disconcertingly, our research also reveals that, even without malicious intent, simply fine-tuning with benign and commonly used datasets can also inadvertently degrade the safety alignment of LLMs, though to a lesser extent. These findings suggest that fine-tuning aligned LLMs introduces new safety risks that current safety infrastructures fall short of addressing --- even if a model's initial safety alignment is impeccable, how can it be maintained after customized fine-tuning? We outline and critically analyze potential mitigations and advocate for further research efforts toward reinforcing safety protocols for the customized fine-tuning of aligned LLMs. (This paper contains red-teaming data and model-generated content that can be offensive in nature.)", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=hTEGyKf0dZ", "arxiv_id": "2310.03693", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19729, "title": "Monte Carlo guided Denoising Diffusion models for Bayesian linear inverse problems.", "authors": [ "Gabriel Cardoso", "Yazid Janati el idrissi", "Sylvain Le Corff", "Eric Moulines" ], "abstract": "Ill-posed linear inverse problems arise frequently in various applications, from computational photography to medical imaging.A recent line of research exploits Bayesian inference with informative priors to handle the ill-posedness of such problems.Amongst such priors, score-based generative models (SGM) have recently been successfully applied to several different inverse problems.In this study, we exploit the particular structure of the prior defined by the SGM to define a sequence of intermediate linear inverse problems. As the noise level decreases, the posteriors of these inverse problems get closer to the target posterior of the original inverse problem. To sample from this sequence of posteriors, we propose the use of Sequential Monte Carlo (SMC) methods.The proposed algorithm, \\algo, is shown to be theoretically grounded and we provide numerical simulations showing that it outperforms competing baselines when dealing with ill-posed inverse problems in a Bayesian setting.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=nHESwXvxWK", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19745, "title": "METRA: Scalable Unsupervised RL with Metric-Aware Abstraction", "authors": [ "Seohong Park", "Oleh Rybkin", "Sergey Levine" ], "abstract": "Unsupervised pre-training strategies have proven to be highly effective in natural language processing and computer vision. Likewise, unsupervised reinforcement learning (RL) holds the promise of discovering a variety of potentially useful behaviors that can accelerate the learning of a wide array of downstream tasks. Previous unsupervised RL approaches have mainly focused on pure exploration and mutual information skill learning. However, despite the previous attempts, making unsupervised RL truly scalable still remains a major open challenge: pure exploration approaches might struggle in complex environments with large state spaces, where covering every possible transition is infeasible, and mutual information skill learning approaches might completely fail to explore the environment due to the lack of incentives. To make unsupervised RL scalable to complex, high-dimensional environments, we propose a novel unsupervised RL objective, which we call **Metric-Aware Abstraction** (**METRA**). Our main idea is, instead of directly covering the state space, to only cover a compact latent space $\\mathcal{Z}$ that is *metrically* connected to the state space $\\mathcal{S}$ by temporal distances. By learning to move in every direction in the latent space, METRA obtains a tractable set of diverse behaviors that approximately cover the state space, being scalable to high-dimensional environments. Through our experiments in five locomotion and manipulation environments, we demonstrate that METRA can discover a variety of useful behaviors even in complex, pixel-based environments, being the *first* unsupervised RL method that discovers diverse locomotion behaviors in pixel-based Quadruped and Humanoid. Our code and video are available at https://sites.google.com/view/metra0", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=c5pwL0Soay", "arxiv_id": "2310.08887", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19728, "title": "Pre-Training Goal-based Models for Sample-Efficient Reinforcement Learning", "authors": [ "Haoqi Yuan", "Zhancun Mu", "Feiyang Xie", "Zongqing Lu" ], "abstract": "Pre-training on task-agnostic large datasets is a promising approach for enhancing the sample efficiency of reinforcement learning (RL) in solving complex tasks. We present PTGM, a novel method that pre-trains goal-based models to augment RL by providing temporal abstractions and behavior regularization. PTGM involves pre-training a low-level, goal-conditioned policy and training a high-level policy to generate goals for subsequent RL tasks. To address the challenges posed by the high-dimensional goal space, while simultaneously maintaining the agent's capability to accomplish various skills, we propose clustering goals in the dataset to form a discrete high-level action space. Additionally, we introduce a pre-trained goal prior model to regularize the behavior of the high-level policy in RL, enhancing sample efficiency and learning stability. Experimental results in a robotic simulation environment and the challenging open-world environment of Minecraft demonstrate PTGM\u2019s superiority in sample efficiency and task performance compared to baselines. Moreover, PTGM exemplifies enhanced interpretability and generalization of the acquired low-level skills.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=o2IEmeLL9r", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19740, "title": "Flow Matching on General Geometries", "authors": [ "Ricky T. Q. Chen", "Yaron Lipman" ], "abstract": "We propose Riemannian Flow Matching (RFM), a simple yet powerful framework for training continuous normalizing flows on manifolds. Existing methods for generative modeling on manifolds either require expensive simulation, are inherently unable to scale to high dimensions, or use approximations for limiting quantities that result in biased training objectives. Riemannian Flow Matching bypasses these limitations and offers several advantages over previous approaches: it is simulation-free on simple geometries, does not require divergence computation, and computes its target vector field in closed-form. The key ingredient behind RFM is the construction of a relatively simple premetric for defining target vector fields, which encompasses the existing Euclidean case. To extend to general geometries, we rely on the use of spectral decompositions to efficiently compute premetrics on the fly. Our method achieves state-of-the-art performance on real-world non-Euclidean datasets, and we demonstrate tractable training on general geometries, including triangular meshes with highly non-trivial curvature and boundaries.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=g7ohDlTITL", "arxiv_id": "2302.03660", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19727, "title": "Graph Neural Networks for Learning Equivariant Representations of Neural Networks", "authors": [ "Miltiadis Kofinas", "Boris Knyazev", "Yan Zhang", "Yunlu Chen", "Gertjan J. Burghouts", "Efstratios Gavves", "Cees G. M. Snoek", "David W. Zhang" ], "abstract": "Neural networks that process the parameters of other neural networks find applications in domains as diverse as classifying implicit neural representations, generating neural network weights, and predicting generalization errors.However, existing approaches either overlook the inherent permutation symmetry in the neural network or rely on intricate weight-sharing patterns to achieve equivariance, while ignoring the impact of the network architecture itself.In this work, we propose to represent neural networks as computational graphs of parameters, which allows us to harness powerful graph neural networks and transformers that preserve permutation symmetry.Consequently, our approach enables a single model to encode neural computational graphs with diverse architectures.We showcase the effectiveness of our method on a wide range of tasks, including classification and editing of implicit neural representations, predicting generalization performance, and learning to optimize, while consistently outperforming state-of-the-art methods.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=oO6FsMyDBt", "arxiv_id": "2403.12143", "GitHub": [ "https://github.com/mkofinas/neural-graphs" ], "Space": [], "Model": [], "Dataset": [] }, { "id": 19789, "title": "BooookScore: A systematic exploration of book-length summarization in the era of LLMs", "authors": [ "Yapei Chang", "Kyle Lo", "Tanya Goyal", "Mohit Iyyer" ], "abstract": "Summarizing book-length documents ($>$100K tokens) that exceed the context window size of large language models (LLMs) requires first breaking the input document into smaller chunks and then prompting an LLM to merge, update, and compress chunk-level summaries. Despite the complexity and importance of this task, it has yet to be meaningfully studied due to the challenges of evaluation: existing book-length summarization datasets (e.g., BookSum) are in the pretraining data of most public LLMs, and existing evaluation methods struggle to capture errors made by modern LLM summarizers. In this paper, we present the first study of the coherence of LLM-based book-length summarizers implemented via two prompting workflows: (1) hierarchically merging chunk-level summaries, and (2) incrementally updating a running summary. We obtain 1193 fine-grained human annotations on GPT-4 generated summaries of 100 recently-published books and identify eight common types of coherence errors made by LLMs. Because human evaluation is expensive and time-consuming, we develop an automatic metric, BooookScore, that measures the proportion of sentences in a summary that do not contain any of the identified error types. BooookScore has high agreement with human annotations and allows us to systematically evaluate the impact of many other critical parameters (e.g., chunk size, base LLM) while saving \\$15K and 500 hours in human evaluation costs. We find that closed-source LLMs such as GPT-4 and Claude 2 produce summaries with higher BooookScore than the oft-repetitive ones generated by LLaMA 2. Incremental updating yields lower BooookScore but higher level of detail than hierarchical merging, a trade-off sometimes preferred by human annotators. We release code and annotations after blind review to spur more principled research on book-length summarization.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=7Ttk3RzDeu", "arxiv_id": "2310.00785", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19757, "title": "SWE-bench: Can Language Models Resolve Real-world Github Issues?", "authors": [ "Carlos E Jimenez", "John Yang", "Alexander Wettig", "Shunyu Yao", "Kexin Pei", "Ofir Press", "Karthik R Narasimhan" ], "abstract": "Language models (LMs) have been improving rapidly, and today we lack benchmarks that are hard to solve but easy to evaluate. Coding is such a desired task, but existing coding benchmarks only feature self-contained problems solvable within tens of lines. Inspired by how real-world programmers code to fix bugs or ship new features, we introduce SWE-bench, a benchmark with 2,294 GitHub issues sourced from 12 popular Python repositories. Given a codebase and an issue description, an LM is tasked with editing the codebase to resolve the issue and pass all related tests. Our experiments show that both state-of-the-art proprietary LMs and our fine-tuned LM, SWE-Llama, can resolve only the simplest issues. For example, Claude 2 and GPT-4 solve a mere 3.6% and 1.3% of tasks respectively, even when provided with an oracle retriever. Through systematic analysis, we identify various factors underlying LM performances, such as the retrieval setup, codebase size, and issue complexity. We also identify key challenges for LMs to solve real-world software engineering problems, including understanding cross-file dependencies, localizing edit locations, and generating long and well-formatted patch files. SWE-bench shows that real-world software engineering is a diverse, challenging and sustainable testbed for evaluating a wide range of language model abilities.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=VTF8yNQM66", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19714, "title": "ValUES: A Framework for Systematic Validation of Uncertainty Estimation in Semantic Segmentation", "authors": [ "Kim-Celine Kahl", "Carsten T. L\u00fcth", "Maximilian Zenk", "Klaus Maier-Hein", "Paul F Jaeger" ], "abstract": "Uncertainty estimation is an essential and heavily-studied component for the reliable application of semantic segmentation methods. While various studies exist claiming methodological advances on the one hand, and successful application on the other hand, the field is currently hampered by a gap between theory and practice leaving fundamental questions unanswered: Can data-related and model-related uncertainty really be separated in practice? Which components of an uncertainty method are essential for real-world performance? Which uncertainty method works well for which application? In this work, we link this research gap to a lack of systematic and comprehensive evaluation of uncertainty methods. Specifically, we identify three key pitfalls in current literature and present an evaluation framework that bridges the research gap by providing 1) a controlled environment for studying data ambiguities as well as distribution shifts, 2) systematic ablations of relevant method components, and 3) test-beds for the five predominant uncertainty applications: OoD-detection, active learning, failure detection, calibration, and ambiguity modeling. Empirical results on simulated as well as real-world data demonstrate how the proposed framework is able to answer the predominant questions in the field revealing for instance that 1) separation of uncertainty types works on simulated data but does not necessarily translate to real-world data, 2) aggregation of scores is a crucial but currently neglected component of uncertainty methods, 3) While ensembles are performing most robustly across the different downstream tasks and settings, test-time augmentation often constitutes a light-weight alternative. (Code will be released upon acceptance)", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=yV6fD7LYkF", "arxiv_id": "2401.08501", "GitHub": [ "https://github.com/IML-DKFZ/values" ], "Space": [], "Model": [], "Dataset": [] }, { "id": 19788, "title": "Provable Compositional Generalization for Object-Centric Learning", "authors": [ "Thadd\u00e4us Wiedemer", "Jack Brady", "Alexander Panfilov", "Attila Juhos", "Matthias Bethge", "Wieland Brendel" ], "abstract": "Learning representations that generalize to novel compositions of known concepts is crucial for bridging the gap between human and machine perception. One prominent effort is learning object-centric representations, which are widely conjectured to enable compositional generalization. Yet, it remains unclear when this conjecture will be true, as a principled theoretical or empirical understanding of compositional generalization is lacking. In this work, we investigate when compositional generalization is guaranteed for object-centric representations through the lens of identifiability theory. We show that autoencoders that satisfy structural assumptions on the decoder and enforce encoder-decoder consistency will learn object-centric representations that provably generalize compositionally. We validate our theoretical result and highlight the practical relevance of our assumptions through experiments on synthetic image data.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=7VPTUWkiDQ", "arxiv_id": "2310.05327", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19744, "title": "Approximating Nash Equilibria in Normal-Form Games via Stochastic Optimization", "authors": [ "Ian Gemp", "Luke Marris", "Georgios Piliouras" ], "abstract": "We propose the first loss function for approximate Nash equilibria of normal-form games that is amenable to unbiased Monte Carlo estimation. This construction allows us to deploy standard non-convex stochastic optimization techniques for approximating Nash equilibria, resulting in novel algorithms with provable guarantees. We complement our theoretical analysis with experiments demonstrating that stochastic gradient descent can outperform previous state-of-the-art approaches.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=cc8h3I3V4E", "arxiv_id": "2310.06689", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19747, "title": "Phenomenal Yet Puzzling: Testing Inductive Reasoning Capabilities of Language Models with Hypothesis Refinement", "authors": [ "Linlu Qiu", "Liwei Jiang", "Ximing Lu", "Melanie Sclar", "Valentina Pyatkin", "Chandra Bhagavatula", "Bailin Wang", "Yoon Kim", "Yejin Choi", "Nouha Dziri", "Xiang Ren" ], "abstract": "The ability to derive the underlying principles from a handful of observations and then generalize to novel situations---known as inductive reasoning---is central to human intelligence. Prior work suggests that language models (LMs) often fall short on inductive reasoning, despite achieving impressive success on research benchmarks. In this work, we conduct a systematic study of the inductive reasoning capabilities of LMs through $\\textit{iterative hypothesis refinement}$, a technique that more closely mirrors the human inductive process than standard input-output prompting. Iterative hypothesis refinement employs a three-step process: proposing, selecting, and refining hypotheses in the form of textual rules. By examining the intermediate rules, we observe that LMs are phenomenal $\\textit{hypothesis proposers}$ (i.e., generating candidate rules), and when coupled with a (task-specific) symbolic interpreter that is able to systematically filter the proposed set of rules, this hybrid approach achieves strong results across inductive reasoning benchmarks that require inducing causal relations, language-like instructions, and symbolic concepts. However, they also behave as puzzling $\\textit{inductive reasoners}$, showing notable performance gaps in rule induction (i.e., identifying plausible rules) and rule application (i.e., applying proposed rules to instances), suggesting that LMs are proposing hypotheses without being able to actually apply the rules. Through extensive empirical and human analyses, we further reveal several discrepancies between the inductive reasoning processes of LMs and humans, shedding light on both the potentials and limitations of using LMs in inductive reasoning tasks.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=bNt7oajl2a", "arxiv_id": "2310.08559", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19749, "title": "The mechanistic basis of data dependence and abrupt learning in an in-context classification task", "authors": [ "Gautam Reddy" ], "abstract": "Transformer models exhibit in-context learning: the ability to accurately predict the response to a novel query based on illustrative examples in the input sequence, which contrasts with traditional in-weights learning of query-output relationships. What aspects of the training data distribution and architecture favor in-context vs in-weights learning? Recent work has shown that specific distributional properties inherent in language, such as burstiness, large dictionaries and skewed rank-frequency distributions, control the trade-off or simultaneous appearance of these two forms of learning. We first show that these results are recapitulated in a minimal attention-only network trained on a simplified dataset. In-context learning (ICL) is driven by the abrupt emergence of an induction head, which subsequently competes with in-weights learning. By identifying progress measures that precede in-context learning and targeted experiments, we construct a two-parameter model of an induction head which emulates the full data distributional dependencies displayed by the attention-based network. A phenomenological model of induction head formation traces its abrupt emergence to the sequential learning of three nested logits enabled by an intrinsic curriculum. We propose that the sharp transitions in attention-based networks arise due to a specific chain of multi-layer operations necessary to achieve ICL, which is implemented by nested nonlinearities sequentially learned during training.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=aN4Jf6Cx69", "arxiv_id": "2312.03002", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19755, "title": "Lipschitz Singularities in Diffusion Models", "authors": [ "Zhantao Yang", "Ruili Feng", "Han Zhang", "Yujun Shen", "Kai Zhu", "Lianghua Huang", "Yifei Zhang", "Yu Liu", "Deli Zhao", "Jingren Zhou", "Fan Cheng" ], "abstract": "Diffusion models, which employ stochastic differential equations to sample images through integrals, have emerged as a dominant class of generative models. However, the rationality of the diffusion process itself receives limited attention, leaving the question of whether the problem is well-posed and well-conditioned. In this paper, we uncover a vexing propensity of diffusion models: they frequently exhibit the infinite Lipschitz near the zero point of timesteps. We provide theoretical proofs to illustrate the presence of infinite Lipschitz constants and empirical results to confirm it. The Lipschitz singularities pose a threat to the stability and accuracy during both the training and inference processes of diffusion models. Therefore, the mitigation of Lipschitz singularities holds great potential for enhancing the performance of diffusion models. To address this challenge, we propose a novel approach, dubbed E-TSDM, which alleviates the Lipschitz singularities of the diffusion model near the zero point. Remarkably, our technique yields a substantial improvement in performance. Moreover, as a byproduct of our method, we achieve a dramatic reduction in the Fr\u00e9chet Inception Distance of acceleration methods relying on network Lipschitz, including DDIM and DPM-Solver, by over 33\\%. Extensive experiments on diverse datasets validate our theory and method. Our work may advance the understanding of the general diffusion process, and also provide insights for the design of diffusion models.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=WNkW0cOwiz", "arxiv_id": "2306.11251", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19754, "title": "Improved Techniques for Training Consistency Models", "authors": [ "Yang Song", "Prafulla Dhariwal" ], "abstract": "Consistency models are a nascent family of generative models that can sample high quality data in one step without the need for adversarial training. Current consistency models achieve optimal sample quality by distilling from pre-trained diffusion models, and employing learned metrics such as LPIPS. However, distillation limits the quality of consistency models to that of the pre-trained diffusion model, and LPIPS causes undesirable bias in evaluation. To tackle these challenges, we present improved techniques for consistency training, where consistency models learn directly from data without distillation. We delve into the theory behind consistency training and identify a previously overlooked flaw, which we address by eliminating Exponential Moving Average from the teacher consistency model. To replace learned metrics like LPIPS, we borrow Pseudo-Huber losses from robust statistics. Additionally, we introduce a new noise schedule for the consistency training objective, and propose a new curriculum for total discretization steps. Collectively, these modifications enable consistency models to achieve FID scores of 2.62 and 3.91 on CIFAR-10 and ImageNet $64\\times 64$ respectively in a single sampling step. These scores mark a 3.3$\\times$ improvement compared to prior consistency training approaches. Through two-step sampling, we further reduce FID scores to 2.28 and 3.64, surpassing those obtained via distillation in both one-step and two-step settings, while narrowing the gap between consistency models and state-of-the-art generative models on both datasets.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=WNzy9bRDvG", "arxiv_id": "2310.14189", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19782, "title": "Ghost on the Shell: An Expressive Representation of General 3D Shapes", "authors": [ "Zhen Liu", "Yao Feng", "Yuliang Xiu", "Weiyang Liu", "Liam Paull", "Michael J. Black", "Bernhard Sch\u00f6lkopf" ], "abstract": "The creation of photorealistic virtual worlds requires the accurate modeling of 3D surface geometry for a wide range of objects. For this, meshes are appealing since they enable 1) fast physics-based rendering with realistic material and lighting, 2) physical simulation, and 3) are memory-efficient for modern graphics pipelines. Recent work on reconstructing and statistically modeling 3D shape, however, has critiqued meshes as being topologically inflexible. To capture a wide range of object shapes, any 3D representation must be able to model solid, watertight, shapes as well as thin, open, surfaces. Recent work has focused on the former, and methods for reconstructing open surfaces do not support fast reconstruction with material and lighting or unconditional generative modelling. Inspired by the observation that open surfaces can be seen as islands floating on watertight surfaces, we parametrize open surfaces by defining a manifold signed distance field on watertight templates. With this parametrization, we further develop a grid-based and differentiable representation that parametrizes both watertight and non-watertight meshes of arbitrary topology. Our new representation, called Ghost-on-the-Shell (G-Shell), enables two important applications: differentiable rasterization-based reconstruction from multiview images and generative modelling of non-watertight meshes. We empirically demonstrate that G-Shell achieves state-of-the-art performance on non-watertight mesh reconstruction and generation tasks, while also performing effectively for watertight meshes.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=Ad87VjRqUw", "arxiv_id": "2310.15168", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19736, "title": "Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection", "authors": [ "Akari Asai", "Zeqiu Wu", "Yizhong Wang", "Avirup Sil", "Hannaneh Hajishirzi" ], "abstract": "Retrieval-Augmented Generation (RAG), an ad hoc approach that augments Language Models (LMs) with retrieval, decreases hallucination issues of large LMs. However, indiscriminately retrieving and incorporating a fixed number of retrieved passages, regardless of whether retrieval is necessary, or passages are relevant, diminishes LM versatility or can lead to unhelpful response generation.In this work, we introduce a new framework called **Self-Reflective Retrieval-Augmented Generation (Self-RAG)** that enhances an LM's quality and factuality through retrieval and self-reflection. Our framework trains a single arbitrary LM that adaptively retrieves passages on-demand, and generates and reflects on retrieved passages and its own generations using special tokens, called *reflection* tokens. Generating reflection tokens makes the LM controllable during the inference phase, enabling it to tailor its behavior to diverse task requirements. Experiments show that Self-RAG (7B and 13B parameters) significantly outperforms state-of-the-art LLMs and retrieval-augmented models on a diverse set of tasks. Specifically, Self-RAG outperforms ChatGPT and retrieval-augmented Llama2-chat on multiple tasks including Open-domain QA and fact verification, and it shows significant gains in factuality scores and citation accuracy for long-form generations relative to these models.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=hSyW5go0v8", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19724, "title": "Robust agents learn causal world models", "authors": [ "Jonathan Richens", "Tom Everitt" ], "abstract": "It has long been hypothesised that causal reasoning plays a fundamental role in robust and general intelligence. However, it is not known if agents must learn causal models in order to generalise to new domains, or if other inductive biases are sufficient. We answer this question, showing that any agent capable of satisfying a regret bound under a large set of distributional shifts must have learned an approximate causal model of the data generating process, which converges to the true causal model for optimal agents. We discuss the implications of this result for several research areas including transfer learning and causal inference.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=pOoKI3ouv1", "arxiv_id": "2402.10877", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19768, "title": "MathVista: Evaluating Mathematical Reasoning of Foundation Models in Visual Contexts", "authors": [ "Pan Lu", "Hritik Bansal", "Tony Xia", "Jiacheng Liu", "Chunyuan Li", "Hannaneh Hajishirzi", "Hao Cheng", "Kai-Wei Chang", "Michel Galley", "Jianfeng Gao" ], "abstract": "Although Large Language Models (LLMs) and Large Multimodal Models (LMMs) exhibit impressive skills in various domains, their ability for mathematical reasoning within visual contexts has not been formally examined. Equipping LLMs and LMMs with this capability is vital for general-purpose AI assistants and showcases promising potential in education, data analysis, and scientific discovery. To bridge this gap, we present MathVista, a benchmark designed to amalgamate challenges from diverse mathematical and visual tasks. We first taxonomize the key task types, reasoning skills, and visual contexts from the literature to guide our selection from 28 existing math-focused and visual question answering datasets. Then, we construct three new datasets, IQTest, FunctionQA, and PaperQA, to accommodate for missing types of visual contexts. The problems featured often require deep visual understanding beyond OCR or image captioning, and compositional reasoning with rich domain-specific tools, thus posing a notable challenge to existing models. We conduct a comprehensive evaluation of 11 prominent open-source and proprietary foundation models (LLMs, LLMs augmented with tools, and LMMs). The best-performing model, Multimodal Bard, achieves only 58\\% of human performance (34.8\\% vs 60.3\\%), indicating ample room for further improvement. Given this significant gap, MathVista fuels future research in the development of general-purpose AI agents capable of tackling mathematically intensive and visually rich real-world tasks.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=KUNzEQMWU7", "arxiv_id": "2310.02255", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19785, "title": "A Real-World WebAgent with Planning, Long Context Understanding, and Program Synthesis", "authors": [ "Izzeddin Gur", "Hiroki Furuta", "Austin V Huang", "Mustafa Safdari", "Yutaka Matsuo", "Douglas Eck", "Aleksandra Faust" ], "abstract": "Pre-trained large language models (LLMs) have recently achieved better generalization and sample efficiency in autonomous web automation.However, the performance on real-world websites has still suffered from (1) open domainness, (2) limited context length, and (3) lack of inductive bias on HTML.We introduce WebAgent, an LLM-driven agent that learns from self-experience to complete tasks on real websites following natural language instructions.WebAgent plans ahead by decomposing instructions into canonical sub-instructions, summarizes long HTML documents into task-relevant snippets, and acts on websites via Python programs generated from those.We design WebAgent with Flan-U-PaLM, for grounded code generation, and HTML-T5, new pre-trained LLMs for long HTML documents using local and global attention mechanisms and a mixture of long-span denoising objectives, for planning and summarization.We empirically demonstrate that our modular recipe improves the success on real websites by over 50%, and that HTML-T5 is the best model to solve various HTML understanding tasks; achieving 18.7% higher success rate than the prior method on MiniWoB web automation benchmark, and SoTA performance on Mind2Web, an offline task planning evaluation.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=9JQtrumvg8", "arxiv_id": "2307.12856", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19732, "title": "ASID: Active Exploration for System Identification in Robotic Manipulation", "authors": [ "Marius Memmel", "Andrew Wagenmaker", "Chuning Zhu", "Dieter Fox", "Abhishek Gupta" ], "abstract": "Model-free control strategies such as reinforcement learning have shown the ability to learn control strategies without requiring an accurate model or simulator of the world. While this is appealing due to the lack of modeling requirements, real-world RL can be unsafe and sample inefficient, making it impractical in many safety-critical domains. On the other hand, model-based control techniques leveraging accurate simulators can circumvent these challenges and use a large amount of cheap simulation data to learn controllers that can effectively transfer to the real world. The challenge with such model-based techniques is the requirement for an extremely accurate simulation, requiring both the specification of appropriate simulation assets and physical parameters. This requires considerable human effort to design for every environment being considered. In this work, we propose a learning system that can leverage a small amount of real-world data to autonomously refine a simulation model, and then plan an accurate control strategy that can be deployed in the real world. Our approach critically relies on utilizing an initial (possibly inaccurate) simulator to design effective exploration policies that, when deployed in the real world, collect high-quality data. We demonstrate the efficacy of this paradigm in identifying articulation, mass, and other physical parameters in several challenging robotic manipulation tasks, and illustrate that only a small amount of real-world data can allow for effective sim-to-real transfer.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=jNR6s6OSBT", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19748, "title": "Predictive auxiliary objectives in deep RL mimic learning in the brain", "authors": [ "Ching Fang", "Kim Stachenfeld" ], "abstract": "The ability to predict upcoming events has been hypothesized to comprise a key aspect of natural and machine cognition. This is supported by trends in deep reinforcement learning (RL), where self-supervised auxiliary objectives such as prediction are widely used to support representation learning and improve task performance. Here, we study the effects predictive auxiliary objectives have on representation learning across different modules of an RL system and how these mimic representational changes observed in the brain. We find that predictive objectives improve and stabilize learning particularly in resource-limited architectures, and we identify settings where longer predictive horizons better support representational transfer. Furthermore, we find that representational changes in this RL system bear a striking resemblance to changes in neural activity observed in the brain across various experiments. Specifically, we draw a connection between the auxiliary predictive model of the RL system and hippocampus, an area thought to learn a predictive model to support memory-guided behavior. We also connect the encoder network and the value learning network of the RL system to visual cortex and striatum in the brain, respectively. This work demonstrates how representation learning in deep RL systems can provide an interpretable framework for modeling multi-region interactions in the brain. The deep RL perspective taken here also suggests an additional role of the hippocampus in the brain-- that of an auxiliary learning system that benefits representation learning in other regions.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=agPpmEgf8C", "arxiv_id": "2310.06089", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19720, "title": "Generative Modeling with Phase Stochastic Bridge", "authors": [ "Tianrong Chen", "Jiatao Gu", "Laurent Dinh", "Evangelos Theodorou", "Joshua M. Susskind", "Shuangfei Zhai" ], "abstract": "Diffusion models (DMs) represent state-of-the-art generative models for continuous inputs. DMs work by constructing a Stochastic Differential Equation (SDE) in the input space (ie, position space), and using a neural network to reverse it. In this work, we introduce a novel generative modeling framework grounded in \\textbf{phase space dynamics}, where a phase space is defined as {an augmented space encompassing both position and velocity.} Leveraging insights from Stochastic Optimal Control, we construct a path measure in the phase space that enables efficient sampling. {In contrast to DMs, our framework demonstrates the capability to generate realistic data points at an early stage of dynamics propagation.} This early prediction sets the stage for efficient data generation by leveraging additional velocity information along the trajectory. On standard image generation benchmarks, our model yields favorable performance over baselines in the regime of small Number of Function Evaluations (NFEs). Furthermore, our approach rivals the performance of diffusion models equipped with efficient sampling techniques, underscoring its potential as a new tool generative modeling.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=tUtGjQEDd4", "arxiv_id": "2310.07805", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19767, "title": "Improving Convergence and Generalization Using Parameter Symmetries", "authors": [ "Bo Zhao", "Robert M. Gower", "Robin Walters", "Rose Yu" ], "abstract": "In overparametrized models, different values of the parameters may result in the same loss value. Parameter space symmetries are loss-invariant transformations that change the model parameters. Teleportation applies such transformations to accelerate optimization. However, the exact mechanism behind this algorithm's success is not well understood. In this paper, we show that teleportation not only speeds up optimization in the short-term, but gives overall faster time to convergence. Additionally, teleporting to minima with different curvatures improves generalization, which suggests a connection between the curvature of the minima and generalization ability. Finally, we show that integrating teleportation into a wide range of optimization algorithms and optimization-based meta-learning improves convergence. Our results showcase the versatility of teleportation and demonstrate the potential of incorporating symmetry in optimization.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=L0r0GphlIL", "arxiv_id": "2305.13404", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 18596, "title": "Incremental Randomized Smoothing Certification", "authors": [ "Shubham Ugare", "Tarun Suresh", "Debangshu Banerjee", "Gagandeep Singh", "Sasa Misailovic" ], "abstract": "Randomized smoothing-based certification is an effective approach for obtaining robustness certificates of deep neural networks (DNNs) against adversarial attacks. This method constructs a smoothed DNN model and certifies its robustness through statistical sampling, but it is computationally expensive, especially when certifying with a large number of samples. Furthermore, when the smoothed model is modified (e.g., quantized or pruned), certification guarantees may not hold for the modified DNN, and recertifying from scratch can be prohibitively expensive.We present the first approach for incremental robustness certification for randomized smoothing, IRS. We show how to reuse the certification guarantees for the original smoothed model to certify an approximated model with very few samples. IRS significantly reduces the computational cost of certifying modified DNNs while maintaining strong robustness guarantees. We experimentally demonstrate the effectiveness of our approach, showing up to 4.1x certification speedup over the certification that applies randomized smoothing of the approximate model from scratch.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=SdeAPV1irk", "arxiv_id": "2305.19521", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19741, "title": "Understanding In-Context Learning in Transformers and LLMs by Learning to Learn Discrete Functions", "authors": [ "Satwik Bhattamishra", "Arkil Patel", "Phil Blunsom", "Varun Kanade" ], "abstract": "In order to understand the in-context learning phenomenon, recent works have adopted a stylized experimental framework and demonstrated that Transformers can learn gradient-based learning algorithms for various classes of real-valued functions. However, the limitations of Transformers in implementing learning algorithms, and their ability to learn other forms of algorithms are not well understood. Additionally, the degree to which these capabilities are confined to attention-based models is unclear. Furthermore, it remains to be seen whether the insights derived from these stylized settings can be extrapolated to pretrained Large Language Models (LLMs). In this work, we take a step towards answering these questions by demonstrating the following: (a) On a test-bed with a variety of Boolean function classes, we find that Transformers can nearly match the optimal learning algorithm for 'simpler' tasks, while their performance deteriorates on more 'complex' tasks. Additionally, we find that certain attention-free models perform (almost) identically to Transformers on a range of tasks. (b) When provided a *teaching sequence*, i.e. a set of examples that uniquely identifies a function in a class, we show that Transformers learn more sample-efficiently. Interestingly, our results show that Transformers can learn to implement *two distinct* algorithms to solve a *single* task, and can adaptively select the more sample-efficient algorithm depending on the sequence of in-context examples. (c) Lastly, we show that extant LLMs, e.g. LLaMA-2, GPT-4, can compete with nearest-neighbor baselines on prediction tasks that are guaranteed to not be in their training set.", "type": "Oral", "OpenReview": "https://openreview.net/forum?id=ekeyCgeRfC", "arxiv_id": "2310.03016", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 18595, "title": "Dropout-Based Rashomon Set Exploration for Efficient Predictive Multiplicity Estimation", "authors": [ "Hsiang Hsu", "Guihong Li", "Shaohan Hu", "Chun-Fu Chen" ], "abstract": "Predictive multiplicity refers to the phenomenon in which classification tasks may admit multiple competing models that achieve almost-equally-optimal performance, yet generate conflicting outputs for individual samples.This presents significant concerns, as it can potentially result in systemic exclusion, inexplicable discrimination, and unfairness in practical applications.Measuring and mitigating predictive multiplicity, however, is computationally challenging due to the need to explore all such almost-equally-optimal models, known as the Rashomon set, in potentially huge hypothesis spaces. To address this challenge, we propose a novel framework that utilizes dropout techniques for exploring models in the Rashomon set.We provide rigorous theoretical derivations to connect the dropout parameters to properties of the Rashomon set, and empirically evaluate our framework through extensive experimentation.Numerical results show that our technique consistently outperforms baselines in terms of the effectiveness of predictive multiplicity metric estimation, with runtime speedup up to $20\\times \\sim 5000\\times$.With efficient Rashomon set exploration and metric estimation, mitigation of predictive multiplicity is then achieved through dropout ensemble and model selection.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=Sf2A2PUXO3", "arxiv_id": "2402.00728", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 18680, "title": "Guaranteed Approximation Bounds for Mixed-Precision Neural Operators", "authors": [ "Renbo Tu", "Colin White", "Jean Kossaifi", "Boris Bonev", "Gennady Pekhimenko", "Kamyar Azizzadenesheli", "Anima Anandkumar" ], "abstract": "Neural operators, such as Fourier Neural Operators (FNO), form a principled approach for learning solution operators for partial differential equations (PDE) and other mappings between function spaces. However, many real-world problems require high-resolution training data, and the training time and limited GPU memory pose big barriers. One solution is to train neural operators in mixed precision to reduce the memory requirement and increase training speed. However, existing mixed-precision training techniques are designed for standard neural networks, and we find that their direct application to FNO leads to numerical overflow and poor memory efficiency. Further, at first glance, it may appear that mixed precision in FNO will lead to drastic accuracy degradation since reducing the precision of the Fourier transform yields poor results in classical numerical solvers. We show that this is not the case; in fact, we prove that reducing the precision in FNO still guarantees a good approximation bound, when done in a targeted manner. Specifically, we build on the intuition that neural operator learning inherently induces an approximation error, arising from discretizing the infinite-dimensional ground-truth input function, implying that training in full precision is not needed. We formalize this intuition by rigorously characterizing the approximation and precision errors of FNO and bounding these errors for general input functions. We prove that the precision error is asymptotically comparable to the approximation error. Based on this, we design a simple method to optimize the memory-intensive half-precision tensor contractions by greedily finding the optimal contraction order. Through extensive experiments on different state-of-the-art neural operators, datasets, and GPUs, we demonstrate that our approach reduces GPU memory usage by up to 50% and improves throughput by 58% with little or no reduction in accuracy.", "type": "Poster", "OpenReview": "https://openreview.net/forum?id=QJGj07PD9C", "arxiv_id": "", "GitHub": [], "Space": [], "Model": [], "Dataset": [] }, { "id": 19772, "title": "Towards a statistical theory of data selection under weak supervision", "authors": [ "Germain Kolossov", "Andrea Montanari", "Pulkit Tandon" ], "abstract": "Given a sample of size $N$, it is often useful to select a subsample of smaller size $n