Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- 20241030/2003.08162v2.json +0 -0
- 20241030/2203.08615v3.json +487 -0
- 20241030/2206.03695v3.json +0 -0
- 20241030/2206.14254v4.json +148 -0
- 20241030/2208.02439v2.json +288 -0
- 20241030/2212.08841v4.json +0 -0
- 20241030/2212.10388v2.json +0 -0
- 20241030/2301.11486v3.json +0 -0
- 20241030/2301.13603v3.json +265 -0
- 20241030/2303.10465v2.json +0 -0
- 20241030/2304.00910v4.json +0 -0
- 20241030/2304.00977v2.json +548 -0
- 20241030/2305.12715v4.json +0 -0
- 20241030/2305.14434v2.json +0 -0
- 20241030/2307.08235v2.json +0 -0
- 20241030/2307.08925v3.json +0 -0
- 20241030/2307.10349v2.json +0 -0
- 20241030/2309.04459v2.json +0 -0
- 20241030/2309.12927v3.json +0 -0
- 20241030/2310.05185v3.json +0 -0
- 20241030/2310.07355v5.json +0 -0
- 20241030/2310.08975v3.json +0 -0
- 20241030/2310.14692v3.json +176 -0
- 20241030/2310.19453v4.json +0 -0
- 20241030/2311.00277v3.json +0 -0
- 20241030/2311.03857v2.json +0 -0
- 20241030/2311.08110v3.json +0 -0
- 20241030/2311.08593v2.json +290 -0
- 20241030/2312.01847v2.json +0 -0
- 20241030/2312.05439v3.json +0 -0
- 20241030/2312.10336v2.json +667 -0
- 20241030/2401.00003v6.json +0 -0
- 20241030/2401.02349v2.json +0 -0
- 20241030/2401.10225v5.json +0 -0
- 20241030/2401.15866v2.json +0 -0
- 20241030/2401.16727v4.json +815 -0
- 20241030/2402.00123v2.json +0 -0
- 20241030/2402.00793v3.json +197 -0
- 20241030/2402.01607v3.json +0 -0
- 20241030/2402.02042v3.json +389 -0
- 20241030/2402.02518v2.json +0 -0
- 20241030/2402.03492v3.json +0 -0
- 20241030/2402.04646v2.json +0 -0
- 20241030/2402.05369v3.json +0 -0
- 20241030/2402.05379v3.json +645 -0
- 20241030/2402.06353v3.json +0 -0
- 20241030/2402.09299v4.json +0 -0
- 20241030/2402.10360v3.json +304 -0
- 20241030/2402.14180v2.json +521 -0
- 20241030/2402.14576v3.json +230 -0
20241030/2003.08162v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2203.08615v3.json
ADDED
|
@@ -0,0 +1,487 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Scientific and Technological Information Oriented Semantics-adversarial and Media-adversarial Cross-media Retrieval",
|
| 3 |
+
"abstract": "Cross-media retrieval of scientific and technological information is one of the important tasks in the cross-media study. Cross-media scientific and technological information retrieval obtain target information from massive multi-source and heterogeneous scientific and technological resources, which helps to design applications that meet users\u2019 needs, including scientific and technological information recommendation, personalized scientific and technological information retrieval, etc. The core of cross-media retrieval is to learn a common subspace, so that data from different media can be directly compared with each other after being mapped into this subspace. In subspace learning, existing methods often focus on modeling the discrimination of intra-media data and the invariance of inter-media data after mapping; however, they ignore the semantic consistency of inter-media data before and after mapping and media discrimination of intra-semantics data, which limit the result of cross-media retrieval. In light of this, we propose a scientific and technological information oriented Semantics-adversarial and Media-adversarial Cross-media Retrieval method (SMCR) to find an effective common subspace. Specifically, SMCR minimizes the loss of inter-media semantic consistency in addition to modeling intra-media semantic discrimination, to preserve semantic similarity before and after mapping. Furthermore, SMCR constructs a basic feature mapping network and a refined feature mapping network to jointly minimize the media discriminative loss within semantics, so as to enhance the feature mapping network\u2019s ability to confuse the media discriminant network. Experimental results on two datasets demonstrate that the proposed SMCR outperforms state-of-the-art methods in cross-media retrieval.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "1. Introduction",
|
| 9 |
+
"text": "Science and Technology Information focuses on the cutting-edge trends of high-tech at home and abroad. Real-time follow-up of the latest scientific and technological information helps to promote the development of national strategic scientific and technological forces, drive scientific and technological innovation, and thus ensure high-quality national development. Scientific and technological information contains a large amount of multimedia information (such as images, texts, etc.), and has the characteristics of large volume, rich sources, and diverse types (Peng\net al., 2018 ###reference_b25###; Shi\net al., 2021 ###reference_b28###; Li et al., 2017b ###reference_b17###). As a current research hotspot, cross-media scientific and technological information retrieval is still faced with the problem that the heterogeneous gap and semantic gap between multimedia data need to be broken urgently (Wei\net al., 2016 ###reference_b33###). Through cross-media scientific and technological information retrieval, target scientific and technological information can be obtained from massive multi-source and heterogeneous scientific and technological resources (Li et al., 2017a ###reference_b16###), so as to design applications that meet user needs, including scientific and technological information recommendation (Li\net al., 2018c ###reference_b18###), personalized scientific and technological information retrieval (Salehi\net al., 2015 ###reference_b27###; Yang\net al., 2015 ###reference_b38###), etc.. We aims to solve the problem that the existing cross-media scientific and technological information retrieval methods only consider the discriminative loss of intra-media data and the invariance loss of inter-media data after mapping, while ignoring the semantic consistency loss of inter-media data before and after mapping, and the discriminative loss of intra-semantic data, which limit the effect of cross-media retrieval.\nThere are various types of cross-media scientific and technological information retrieval. Previous work (Wang\net al., 2013 ###reference_b31###; Hu\net al., 2020 ###reference_b11###; Hardoon\net al., 2004 ###reference_b8###; Wang\net al., 2016 ###reference_b30###; Zhai\net al., 2014 ###reference_b40###) focused on traditional statistical correlation analysis methods, learning a linear projection matrix in a common space by optimizing statistical values (Gong\net al., 2014 ###reference_b6###) to build a shared subspace, in which data belonging to different media can be directly compared with each other using common distances in this subspace. All of the above methods rely on a linear representation of the data; however, it is difficult to fully simulate the complex correlations of real-world cross-media data only by linear projection. Therefore, some studies (Feng\net al., 2014 ###reference_b4###; Yan and\nMikolajczyk, 2015 ###reference_b37###; Peng\net al., 2016 ###reference_b24###; Kou\net al., 2016 ###reference_b13###; Xu et al., 2013 ###reference_b34###) solve the above problems through deep learning methods, using its powerful abstraction ability to deal with multi-layer nonlinear transformations of multimedia data for cross-media correlation learning. Existing deep learning-based cross-media retrieval models usually focus on preserving the pairwise similarity of coupled cross-media samples (e.g. images and texts) (Ngiam\net al., 2011 ###reference_b23###). However, for one sample of one media, there may exist\nmore than one semantically different samples of the same media so\nthat this focus on pairwise coupled samples only is far from sufficient (Wang\net al., 2017 ###reference_b29###). Recent works (He\net al., 2017 ###reference_b9###; Li\net al., 2018a ###reference_b14###; Wang\net al., 2017 ###reference_b29###; Zhen\net al., 2019 ###reference_b41###; Liu et al., 2021 ###reference_b22###) introduce the idea of adversarial learning to generate media-invariant representations for samples of different media in a common subspace by jointly performing label prediction and preserving the underlying cross-media semantic structure in the data. However, the above methods only focus on modeling the semantic discrimination of intra-media data and the semantic invariance of inter-media data after subspace mapping, while ignoring the semantic consistency of inter-media data before and after mapping, and the media discriminative within semantics. This limits the effect of cross-media retrieval.\nTo solve the above problems, we strengthens the ability to map different types of media data into a shared high-level semantic space by introducing inter-media semantic consistency and intra-semantics media constraints, and propose a scientific and technological information oriented Semantics-adversarial and Media-adversarial Cross-media Retrieval method (SMCR). SMCR adopts the idea of adversarial learning (Goodfellow et al., 2014 ###reference_b7###) to construct a feature mapper and media discriminator for mini-max game. SMCR follows previous work (Li\net al., 2018b ###reference_b15###; Yu\net al., 2019 ###reference_b39###) by utilizing label prediction to ensure that the data still retains intra-media distinctions after feature mapping. Different from previous work, SMCR simultaneously minimizes the distances between the data of different media in the same semantic text-image pair before and after feature mapping, respectively, to ensure the semantic consistency of the data between different media during the mapping process. To ensure the mapped data is semantically close to itself and far away from itself in media, we construct the basic mapping network and the refined mapping network to assist in modeling the intra-semantics media constraints. It helps to enhance the ability of the feature mapping network to confuse the media discrimination network. Meanwhile, the media discrimination network is responsible for distinguishing the original media of the mapped data, and once it is deceived, the entire game process converges.\nWe have three main contributions which are as follows:\nWe propose a scientific and technological information oriented Semantics-adversarial and Media-adversarial Cross-media Retrieval (SMCR) to break the heterogeneous gap and semantic gap between multimedia data. SMCR effectively learns the public representation of heterogeneous data by maintaining the intra-media semantic discrimination, inter-media semantic consistency, and intra-semantics media discrimination in an end-to-end way;\nWe model intra-semantics media constraints by constructing a basic feature mapping network and a refined feature mapping network to jointly perform feature mapping of multimedia data, to enhance the ability of the feature mapping network in confusing the media discrimination network;\nExtensive experiments on two datasets demonstrate that SMCR outperforms the current state-of-the-art cross-media retrieval methods, including traditional methods and deep learning-based methods."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "2. related work",
|
| 15 |
+
"text": "Cross-media retrieval of scientific and technological information is a research hotspot in recent years, aiming to learn a common subspace (Wang\net al., 2016 ###reference_b30###, 2017 ###reference_b29###; Xue\net al., 2019 ###reference_b35###, 2021 ###reference_b36###), in which data of different media can be directly compared with each other, so as to bridge the semantic gap between different media.\nClassic methods of cross-media retrieval focus on statistical correlation analysis (Wang\net al., 2013 ###reference_b31###; Hu\net al., 2020 ###reference_b11###; Hardoon\net al., 2004 ###reference_b8###; Wang\net al., 2016 ###reference_b30###; Zhai\net al., 2014 ###reference_b40###), which mainly learns the linear projection matrix of the common space by optimizing the statistical value. For instance, Hardoon et al. proposed Canonical Correlation Analysis (CCA) (Hardoon\net al., 2004 ###reference_b8###) by correlating the linear relationship between two multidimensional variables, which can be viewed as using complex labels as a way to guide feature selection towards underlying semantics. This method exploits two perspectives of the same semantic object to extract semantic representations. Wang et al. proposed a Joint Feature Selection and Subspace Learning (JFSSL) (Wang\net al., 2016 ###reference_b30###). Inspired by the underlying relationship between CCA and linear least squares, JFSSL utilizes coupled linear regression to learn the projection matrix such that the data for different media is mapped into a common subspace. At the same time, this method uses regularization to simultaneously select relevant and distinct features from different feature spaces, and uses multimedia graph regularization when mapping to preserve inter-media and intra-media similarity relationships. Zhai et al. (Zhai\net al., 2014 ###reference_b40###) proposed a novel feature learning algorithm for cross-media data called Joint Representation Learning (JRL). This method is able to jointly explore relevance and semantic information in a unified optimization framework, and integrate sparse and semi-supervised regularization for all media types into a unified optimization problem. It aims to simultaneously learn sparse projection matrices for different media and directly project raw heterogeneous features into the joint space. However, it is difficult to fully simulate the complex correlations of cross-media data in the real world through linear projection alone.\nWith the rise of deep learning, many studies have focused on applying deep neural networks capable of multi-layer nonlinear transformations to cross-media retrieval (Feng\net al., 2014 ###reference_b4###; Yan and\nMikolajczyk, 2015 ###reference_b37###; Peng\net al., 2016 ###reference_b24###; Kou\net al., 2016 ###reference_b13###; Xu et al., 2013 ###reference_b34###). For example, Yan et al. (Yan and\nMikolajczyk, 2015 ###reference_b37###) proposed a cross-media image caption matching method based on Deep Canonical Correlation Analysis (DCCA). By addressing non-trivial complexity and overfitting problems, this method is made suitable for high-dimensional image and text representations and large datasets. Peng et al. proposed a Cross-media Multiple Deep Network (CMDN) (Peng\net al., 2016 ###reference_b24###) to exploit complex and rich cross-media correlations through hierarchical learning. In the first stage, the CMDN does not only utilize separate representations within the media as previous works, however jointly learns two complementary separate representations for each media type; in the second stage, since each media type has two complementary separate representations, the method combines separate representations hierarchically in a deeper two-level network in order to jointly model inter-media and intra-media information to generate shared representations. However, existing deep neural network-based cross-media retrieval models usually only focus on preserving the pairwise similarity of coupled cross-media samples (e.g. an image and a piece of text), while ignoring one sample of one medium, which may exist multiple semantically different samples of the same media. Therefore, they cannot preserve cross-media semantic structure.\nIn recent years, state-of-the-art research on cross-media retrieval has turned to adversarial learning (Fang\net al., 2020 ###reference_b3###). Although adversarial learning is widely used in image generation (Radford\net al., 2016 ###reference_b26###), researchers also use it as a regularizer (Ganin and\nLempitsky, 2015 ###reference_b5###). Some studies have adopted the adversarial idea in cross-media retrieval and achieved remarkable results (He\net al., 2017 ###reference_b9###; Li\net al., 2018a ###reference_b14###; Wang\net al., 2017 ###reference_b29###; Zhen\net al., 2019 ###reference_b41###; Liu et al., 2021 ###reference_b22###). For example, Wang et al. proposed Adversarial Cross-Modal Retriviel (ACMR) (Wang\net al., 2017 ###reference_b29###) to address the difficulty of preserving cross-media semantic structure. The method uses feature projectors to generate media-invariant representations for samples of different media in a common subspace by jointly performing label prediction and preserving the underlying cross-media semantic structure in the data. Its purpose is to confuse media classifier acting as an adversary. Media classifier tries to distinguish samples based on their media and in this way guide the learning of feature projectors. Through the convergence of this process, i.e. when the media classifier fails, the representation subspace is optimal for cross-media retrieval. Zhen et al. proposed a Deep Supervised Cross-modal Retrieval method (DSCMR) (Zhen\net al., 2019 ###reference_b41###), which aims to find a common representation space in which samples from different media can be directly compared. This method minimizes the discriminative loss in the label space and common representation space to supervise the model in learning discriminative features. At the same time, the media-invariant loss is minimized, and a weight-sharing strategy is used to remove cross-media differences of multimedia data in a common representation space to learn media-invariant features. Liu et al. proposed a Semantic Similarity based Adversarial Cross-media Retrieval method (SSACR) (Liu et al., 2021 ###reference_b22###), using semantic distribution and similarity as the training basis of feature mapping network, so that the distance between different media data under the same semantics is small, and the distance between the same media data under different semantics is large. Finally, SSACR uses similarity to sort and obtain the search results in the same space. However, the above methods focus on modeling the semantic loss of intra-media data and the semantic loss of inter-media data after mapping, while ignoring the semantic consistency of inter-media data before and after mapping, and the media discrimination within semantics, which limit the effect of cross-media retrieval.\n###figure_1###"
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "3",
|
| 19 |
+
"parent_section_id": null,
|
| 20 |
+
"section_name": "3. Problem Formulation",
|
| 21 |
+
"text": "There are various kinds of multimedia data. To keep generality, we focuses on the cross-media retrieval of text and images. Given a set of semantically related image-text pairs , where represents the -th image-text pair in , represents an image feature vector of dimension , and represents a text feature vector of dimension . Each image-text pair corresponds to a semantic category vector , where represents the total number of semantic categories. If belongs to the -th semantic category, then record , otherwise record . We record the feature matrices corresponding to all images, texts, and semantic categories in as , , and .\nOur goal is to utilize data from one media (such as image or text ) to retrieve data from another media (such as text or image ). To compare the semantic similarity between different media data, we design two feature mapping network: Base-Net and Refine-Net. Base-Net map image features and text features into a unified latent semantic space for semantic similarity comparison. The image feature mapped to latent embedding space is denoted as , and the text feature mapped to latent embedding space is denoted as , where and represent the mapping functions of image and text, respectively. To further improve the quality of feature mapping, we use Refine-Net to map the output features of Base-Net. The image feature is mapped as , and the text feature is mapped as , where and represent the mapping function of image features and text features, respectively."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "4",
|
| 25 |
+
"parent_section_id": null,
|
| 26 |
+
"section_name": "4. Method",
|
| 27 |
+
"text": "We propose a scientific and technological information oriented Semantics-adversarial and Media-adversarial Cross-media Retrieval (SMCR). The framework of SMCR is shown in Fig.1 ###reference_###. The motivation of SMCR is to use the idea of adversarial learning to continuously confront between semantics and media, and learn a common subspace, in which the data of different media can be directly compared with each other."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "4.1",
|
| 31 |
+
"parent_section_id": "4",
|
| 32 |
+
"section_name": "4.1. Feature Mapping Network",
|
| 33 |
+
"text": "Feature mapping network is used to map the features of different media into a unified latent embedding space for the comparison of semantic similarity. At the same time, it also plays the role of the \u201dgenerator\u201d in GAN (Goodfellow et al., 2014 ###reference_b7###), which aims to confuse the media discriminant network (introduced in Section 4.2). In order to make the mapped feature representation fully consider the semantic similarity and media similarity of two types of media data, the feature mapping network consists of three parts: label prediction, semantics preservation, and media constraints. The label prediction within the media enables the features mapped in the latent embedding space to be semantically classified with the original semantic label as the true value; the semantics preservation between media enables the data of same semantics and different media to retain the semantic similarity; The media distinction within semantics makes the mapped data closer to the original semantics."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "4.1.1",
|
| 37 |
+
"parent_section_id": "4.1",
|
| 38 |
+
"section_name": "4.1.1. Label Prediction",
|
| 39 |
+
"text": "To ensure the features mapped into the latent embedding space can still retain the original semantics, label prediction is performed with the original semantic labels as the ground truth. A softmax layer that maintains linear activations is added at the end of each feature map network. The image-text pairs are used as samples for training, and the output of the model is probability distribution of the semantic category corresponding to each data. We calculate intra-media discrimination loss by utilizing the loss function introduced in (Wang\net al., 2017 ###reference_b29###):\nwhere represents the cross-entropy loss for semantic label prediction of all image-text pairs, represents the parameters of the classifier, is the ground-truth of each sample , and denote the result of probability distributions for each data (image or text) in the sample."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "4.1.2",
|
| 43 |
+
"parent_section_id": "4.1",
|
| 44 |
+
"section_name": "4.1.2. Semantics Preservation",
|
| 45 |
+
"text": "The semantics preservation module is dedicated to ensuring that data with the same semantics and different media can retain the semantic similarity before and after mapping, i.e., the data with the same semantics in different media are closer, and the data with different semantics in different media are farther away. Before mapping to the latent embedding space , the semantic distributions of image data and text data in each sample are and , respectively. Then the loss of semantic consistency between two different media data is represented by the norm:\nAfter mapping to the latent embedding space , the semantic consistency loss between the image data feature and the text data feature in each sample are also calculated by norm:\nTherefore, the overall inter-media consistency loss can be modeled as the combination of and :\nwhere represents the loss of semantic consistency between media before and after mapping."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "4.1.3",
|
| 49 |
+
"parent_section_id": "4.1",
|
| 50 |
+
"section_name": "4.1.3. Media Constraint",
|
| 51 |
+
"text": "In addition to facilitating the measurement of semantic similarity between different media data, another advantage of the feature mapping network is to generate mapped features to fool the media discrimination network, making it impossible to distinguish the original media of the feature. Therefore, a media constraint module within semantics is introduced. To map the features of indistinguishable media more realistically, in addition to the basic feature mapping network, i.e. Base-Net , another feature mapping network with the same structure is constructed, which is called Refine-Net. The input of the Refine-Net is the output ( or ) of , and the output of is or , where and represent the mapped result of and which are mapped by Refine-Net , and represent mapping functions of two features , , respectively.\nFor each image-text pair , our goal is to make the feature ( or ) mapped by the Refine-Net away from the features ( or ) mapped by Base-Net , while closing to the features ( or ) of same semantics. Inspired by (Hoffer and Ailon, 2015 ###reference_b10###; Liang\net al., 2018 ###reference_b21###; Wei\net al., 2018 ###reference_b32###), intra-semantics discrimination loss is computed using the following constraint loss:\nTherefore, the overall intra-semantics media constraint loss can be modeled as combination of the constraint loss of image media data and the constraint loss of text media data:"
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "4.1.4",
|
| 55 |
+
"parent_section_id": "4.1",
|
| 56 |
+
"section_name": "4.1.4. Feature Mapper",
|
| 57 |
+
"text": "In summary, the mapping loss of the entire feature mapping network is composed of intra-media discrimination loss, inter-media consistency loss, and intra-semantics discrimination loss, denoted as :\nwhere and are adjustable parameters to control the participation of the two types of losses in the entire feature mapping network\u2019s loss."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "4.2",
|
| 61 |
+
"parent_section_id": "4",
|
| 62 |
+
"section_name": "4.2. Media Discriminator",
|
| 63 |
+
"text": "The media discriminant network plays the role of the \u201cdiscriminator\u201d in GAN (Goodfellow et al., 2014 ###reference_b7###) to determine the original media of the data mapped to the latent embedding space. Let the data label passing through the image mapping function be 0, and the data label passing through the text mapping function be 1. Wer use a three-layer fully connected network with a parameter of as the discriminant network, acting as the adversary of the feature mapping network. Its goal is to minimize the media classification loss, also known as the adversarial loss , defined as:\nwhere represents the cross entropy loss of each sample in the media discriminant network, represents the resulting media probability distribution of each data (image or text)."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "4.3",
|
| 67 |
+
"parent_section_id": "4",
|
| 68 |
+
"section_name": "4.3. Adversarial Learning",
|
| 69 |
+
"text": "The purpose of adversarial learning is to learn the optimal feature representation network parameters by simultaneously minimizing the mapping loss of Eq. (8) and the adversarial loss of Eq. (9). Adversarial learning consists of two sub-processes:\nThe specific adversarial learning training process of SMCR is shown in Algorithm 1 ###reference_thm1###."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "5",
|
| 73 |
+
"parent_section_id": null,
|
| 74 |
+
"section_name": "5. Experimental Setup",
|
| 75 |
+
"text": ""
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "5.1",
|
| 79 |
+
"parent_section_id": "5",
|
| 80 |
+
"section_name": "5.1. Research Questions",
|
| 81 |
+
"text": "Does our Semantics-adversarial and Media-adversarial Cross-media Retrieval method SMCR outperform the state-of-the-art baseline algorithms?\nWhat is the contribution of the key ingredient of SMCR for cross-media retrieval?\nIs the performance of SMCR sensitive to parameters?"
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "5.2",
|
| 85 |
+
"parent_section_id": "5",
|
| 86 |
+
"section_name": "5.2. Dataset",
|
| 87 |
+
"text": "To answer the above research questions, experiments were conducted using a dataset crawled from the science and technology information website SciTechDaily111https://scitechdaily.com/news/technology. The dataset consists of 5,217 image-text pairs, 4,173 of which are used as training set and 1,044 as test set. To verify the generality of the SMCR model, experiments are also conducted using the Wikipedia (Costa Pereira et al., 2014 ###reference_b2###) dataset. The Wikipedia dataset consists of 2866 image-text pairs, 2,292 of which are used as the training set and 574 as the test set. The details of the two datasets are shown in Table 1 ###reference_###.\n###table_1###"
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "5.3",
|
| 91 |
+
"parent_section_id": "5",
|
| 92 |
+
"section_name": "5.3. Baselines",
|
| 93 |
+
"text": "We compares SMCR with the following baselines and state-of-the-art algorithms:\nCanonical Correlation Analysis (CCA): This model (Hardoon\net al., 2004 ###reference_b8###) learns a common subspace for data of different media types to maximize the pairwise correlation between two sets of heterogeneous data.\nJoint Feature Selection and Subspace Learning (JFSSL): This model (Wang\net al., 2016 ###reference_b30###) learns a projection matrix to map multimedia data into a common subspace, and simultaneously selects related features and distinguishing features from different feature spaces.\nCross-media Multiple Deep Network (CMDN): This model (Peng\net al., 2016 ###reference_b24###) exploits complex cross-media correlations through hierarchical learning. In the first stage, intra-media and media information are jointly modeled; in the second stage, inter-media representations and intra-media representations are hierarchically combined to further learn rich cross-media correlations.\nAdversarial Cross-Modal Retriviel (ACMR): This model (Wang\net al., 2017 ###reference_b29###) seeks efficient common subspaces based on adversarial learning. A triple constraint is imposed on the feature projector to minimize the gap between the representations of all samples from different media with the same semantic label, while maximizing the distance between semantically different images and texts.\nDeep Supervised Cross-modal Retrieval (DSCMR): This model (Zhen\net al., 2019 ###reference_b41###) is also based on the idea of adversarial learning. The discriminative loss in the label space and the common representation space is minimized, while the media invariance loss is minimized, and a weight sharing strategy is used to eliminate cross-media differences of multimedia data in the common representation space.\nSemantic Similarity based Adversarial Cross-media Retrieval (SSACR): This model (Liu et al., 2021 ###reference_b22###) is also based on the idea of adversarial learning. The similarity of the feature vectors of different media data mapped to the same semantic space is calculated and compared with the similarity between the original semantic feature vectors to eliminate the differences between different media data under the same semantics.\nTo further analyze the contribution of the key ingredients of our SMCR to cross-media retrieval, we use two versions of SMCR for baselines:\nSMCR(without L): This model is the SMCR model which removes the inter-media semantic loss .\nSMCR(without L): This model is the SMCR model which removes the intra-semantics media loss .\n###table_2###"
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "5.4",
|
| 97 |
+
"parent_section_id": "5",
|
| 98 |
+
"section_name": "5.4. Evaluation Metrics",
|
| 99 |
+
"text": "We use the classic evaluation metric in cross-media retrieval (Kou\net al., 2019 ###reference_b12###; Liang\net al., 2020 ###reference_b20###, 2019 ###reference_b19###), mean Average Precision (mAP), to evaluate the performance of algorithms in the two tasks: using text to retrieve image (txt2img) and using image to retrieve text (img2txt). To calculate mAP, we need to calculate the average precision of retrieved documents first, where is the number of related documents in the retrieved documents, represents the precision of the first retrieved documents, if the th retrieved document is relevant, then , otherwise . Then mAP is calculated by averaging the AP values of all queries in the query set. The larger the mAP value, the more accurate the results of cross-media retrieval."
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"section_id": "6",
|
| 103 |
+
"parent_section_id": null,
|
| 104 |
+
"section_name": "6. Results and Analysis",
|
| 105 |
+
"text": "In this section, all experimental results are analyzed to answer the research questions raised in Section 5.1."
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"section_id": "6.1",
|
| 109 |
+
"parent_section_id": "6",
|
| 110 |
+
"section_name": "6.1. Effectiveness of SMCR",
|
| 111 |
+
"text": "To answer RQ1, we compare SMCR with six state-of-the-art algorithms on two datasets, SciTechDaily and Wikipedia, respectively. The baselines are: 1) methods based on statistical correlation analysis: CCA(Hardoon\net al., 2004 ###reference_b8###), JFSSL(Wang\net al., 2016 ###reference_b30###); 2) methods based on deep learning: CMDN(Peng\net al., 2016 ###reference_b24###), ACMR(Wang\net al., 2017 ###reference_b29###), DSCMR(Zhen\net al., 2019 ###reference_b41###), SSACR(Liu et al., 2021 ###reference_b22###).\nTable 2 ###reference_### shows the mAP values (mAP@5, mAP@25, mAP@50) calculated for the first 5, 25, and 50 retrieval results on the two tasks of using text to retrieve image (txt2img) and using image to retrieve text (img2txt). Besides, it also demonstrate the mean results of mAP (Average) on the two retrieval tasks. From Table 2, we have the following findings:\n1) SMCR outperforms all state-of-the-art algorithms, including methods based on statistical correlation analysis and methods based on deep learning. It is worth mentioning that the mean mAP of the SMCR algorithm on the first 5, 25, and 50 retrieval results is superior to the current state-of-the-art SSACR algorithm on both datasets. This demonstrates that although SSACR also models intra-media semantic loss and inter-media semantic loss, SMCR introduces inter-media semantic consistency loss and intra-semantics discrimination loss, which help to further improve cross-media retrieval performance by more realistically mapping the feature representation of indistinguishable media;\n2) SMCR, JFSSL, CMDN, ACMR, DSCMR, SSACR, which simultaneously model intra-media similarity and inter-media similarity, are better than CCA modeling inter-media similarity based on image-text pair. This finding indicates that considering both intra-media similarity and inter-media similarity can improve the performance of cross-media retrieval;\n3) The cross-media retrieval performance of SMCR, ACMR, DSCMR, and SSACR is better than that of CMDN, which also models inter-media invariance and intra-media discrimination in the multi-task learning framework, indicating that adversarial learning helps to further improve the modeling effect of inter-media invariance and intra-media discrimination;\n4) SMCR, which separately modeling the semantic similarity of different media data with the same semantics before and after mapping, outperforms ACMR and DSCMR, which only model the semantic similarity of different media data with the same semantics after mapping. This finding demonstrates that modeling the semantic invariance of data from different media before and after mapping helps to improve the performance of cross-media retrieval;\n5) There are consistent performance of SMCR and all state-of-the-art algorithms on both SciTechDaily and Wikipedia datasets, which indicates that the SMCR algorithm is not only limited to the retrieval of cross-media scientific and technological information, but also has good performance in general cross-media retrieval tasks.\n###figure_2###"
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"section_id": "6.2",
|
| 115 |
+
"parent_section_id": "6",
|
| 116 |
+
"section_name": "6.2. Contribution of Key Ingredients of SMCR",
|
| 117 |
+
"text": "Next, to answer RQ2, we compare SMCR with two versions of SMCR: SMCR without inter-media consistency loss , and SMCR without intra-semantics discrimination loss . Since the intra-media discrimination loss modeled by label prediction is not an innovation in our SMCR, the SMCR without is not compared.\n###table_3### Tables 3 ###reference_### and 4 ###reference_### show the comparison results of SMCR and its two variants on the SciTechDaily and Wikipedia datasets, respectively. We have the following findings:\n1) SMCR without inter-media consistency loss and SMCR without intra-semantics discrimination loss , underperform SMCR in cross-media retrieval. This finding demonstrates that optimizing the inter-media consistency loss and the intra-semantics discrimination loss simultaneously in the feature mapping network is more helpful to improve the cross-media retrieval performance than optimizing one of them alone;\n2) There are consistent cross-media retrieval performance of SMCR and its variants on both SciTechDaily and Wikipedia datasets. Again, this finding indicates that the SMCR algorithm is not only limited to cross-media scientific and technological information retrieval, but also effective on general cross-media retrieval tasks.\n###table_4###"
|
| 118 |
+
},
|
| 119 |
+
{
|
| 120 |
+
"section_id": "6.3",
|
| 121 |
+
"parent_section_id": "6",
|
| 122 |
+
"section_name": "6.3. Parameter Sensitivity of SMCR",
|
| 123 |
+
"text": "Finally, we answer RQ3. The mapping loss of the feature mapping network in Eq. (8) has two parameters and , which control the participation of inter-media consistency loss and intra-semantics discrimination loss in the overall mapping loss . This section changes the values of and on the Wikipedia dataset to test the parameter sensitivity of the SMCR algorithm. The parameters and are set to 0.1, 1, 10, 100 respectively. In particular, when , SMCR degenerates into SMCR without inter-media consistency loss ; when , the SMCR degenerates into an SMCR without intra-semantics discrimination loss . Therefore, the values of and are not 0. Under the premise of fixing one parameter (such as ), we change another parameter (such as ) to conduct experiments, and use mAP@50 value to evaluate the performance of using text to retrieve image, using image to retrieve text, and average retrieval effect respectively. The results are shown in Fig. 2 ###reference_###.\nAs can be seen from Fig. 2 ###reference_###, when the value of is 0.1, 1, 10, and the value of is 0.1, 1, 10, 100, SMCR performs better. This indicates that SMCR is insensitive to parameters, i.e. has better generalization ability. In particular, on the task of using text to retrieve image, SMCR performs best when and ; on the task of using image to retrieve text, when and , SMCR achieves the best retrieval effect; in terms of average retrieval effect, when and , SMCR performs best."
|
| 124 |
+
},
|
| 125 |
+
{
|
| 126 |
+
"section_id": "7",
|
| 127 |
+
"parent_section_id": null,
|
| 128 |
+
"section_name": "7. Conclusion",
|
| 129 |
+
"text": "We proposes a Scientific and Technological Information Oriented Semantics-adversarial and Media-adversarial Cross-media Retrieval method (SMCR), which can simultaneously learn intra-media discriminative, inter-media consistent, and intra-semantics discriminative representations in cross-media retrieval. SMCR is based on an adversarial learning approach and involves two processes in the minimax game: feature mapping networks generate representations with intra-media discrimination, inter-media consistency, and inter-semantics discrimination, and media discrimination networks try to discriminate the original media of given data. SMCR introduces inter-media consistency loss to ensure that the data between media before and after the mapping retains semantic consistency; in addition, the intra-semantic media discriminative loss is introduced to ensure that the mapped data is semantically close to itself, and far away from itself in media, to enhance The ability of feature mapping networks to confuse media discrimination networks. Experimental results on two cross-media datasets demonstrate the effectiveness of the SMCR algorithm and the performance of SMCR outperform state-of-the-art methods on cross-media retrieval."
|
| 130 |
+
}
|
| 131 |
+
],
|
| 132 |
+
"appendix": [],
|
| 133 |
+
"tables": {
|
| 134 |
+
"1": {
|
| 135 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S5.T1\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 1. </span>Properties of the two datasets used for the experiments</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_align_middle\" id=\"S5.T1.1\">\n<tr class=\"ltx_tr\" id=\"S5.T1.1.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T1.1.1.1\">dataset</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T1.1.1.2\">sample</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T1.1.1.3\">label</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T1.1.1.4\">image feature</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T1.1.1.5\">text feature</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.1.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T1.1.2.1\">SciTechDaily</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T1.1.2.2\">4,173/1,044</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T1.1.2.3\">8</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T1.1.2.4\">4,096d VGG</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T1.1.2.5\">6,500d BoW</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.1.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"S5.T1.1.3.1\">Wikipedia</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"S5.T1.1.3.2\">2,292/574</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"S5.T1.1.3.3\">10</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"S5.T1.1.3.4\">4,096d VGG</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"S5.T1.1.3.5\">5,000d BoW</td>\n</tr>\n</table>\n</figure>",
|
| 136 |
+
"capture": "Table 1. Properties of the two datasets used for the experiments"
|
| 137 |
+
},
|
| 138 |
+
"2": {
|
| 139 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S5.T2\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 2. </span>Comparison of cross-media retrieval performance on SciTechDaily and Wikipedia datasets</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_align_middle\" id=\"S5.T2.1\">\n<tr class=\"ltx_tr\" id=\"S5.T2.1.1\">\n<td class=\"ltx_td ltx_border_l ltx_border_r ltx_border_t\" colspan=\"2\" id=\"S5.T2.1.1.1\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" colspan=\"3\" id=\"S5.T2.1.1.2\">mAP@5</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" colspan=\"3\" id=\"S5.T2.1.1.3\">mAP@25</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" colspan=\"3\" id=\"S5.T2.1.1.4\">mAP@50</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S5.T2.1.2.1\">dataset</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T2.1.2.2\">method</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.2.3\">txt2img</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.2.4\">img2txt</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T2.1.2.5\">Average</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.2.6\">txt2img</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.2.7\">img2txt</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T2.1.2.8\">Average</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.2.9\">txt2img</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.2.10\">img2txt</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T2.1.2.11\">Average</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S5.T2.1.3.1\" rowspan=\"7\"><span class=\"ltx_text\" id=\"S5.T2.1.3.1.1\">\n<span class=\"ltx_inline-block ltx_transformed_outer\" id=\"S5.T2.1.3.1.1.1\" style=\"width:8.9pt;height:56.8pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"width:56.8pt;transform:translate(-23.96pt,-22.99pt) rotate(-90deg) ;\">\n<span class=\"ltx_p\" id=\"S5.T2.1.3.1.1.1.1\">SciTechDaily</span>\n</span></span></span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T2.1.3.2\">CCA</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.3.3\">0.2337</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.3.4\">0.1806</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T2.1.3.5\">0.2071</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.3.6\">0.2328</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.3.7\">0.1761</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T2.1.3.8\">0.2044</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.3.9\">0.2225</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.3.10\">0.1789</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T2.1.3.11\">0.2007</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.4.1\">JFSSL</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.4.2\">0.3984</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.4.3\">0.2852</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.4.4\">0.3418</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.4.5\">0.3817</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.4.6\">0.2777</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.4.7\">0.3297</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.4.8\">0.3699</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.4.9\">0.2647</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.4.10\">0.3173</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.5\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.5.1\">CMDN</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.5.2\">0.4483</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.5.3\">0.3514</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.5.4\">0.3998</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.5.5\">0.4299</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.5.6\">0.3443</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.5.7\">0.3871</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.5.8\">0.4206</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.5.9\">0.3229</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.5.10\">0.3717</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.6\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.6.1\">ACMR</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.6.2\">0.5131</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.6.3\">0.4382</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.6.4\">0.4756</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.6.5\">0.4943</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.6.6\">0.4471</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.6.7\">0.4707</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.6.8\">0.4966</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.6.9\">0.4259</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.6.10\">0.4612</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.7\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.7.1\">DSCMR</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.7.2\">0.5042</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.7.3\">0.4577</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.7.4\">0.4809</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.7.5\">0.4812</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.7.6\">0.4646</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.7.7\">0.4729</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.7.8\">0.4810</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.7.9\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.7.9.1\">0.4467</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.7.10\">0.4638</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.8\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.8.1\">SSACR</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.8.2\">0.5091</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.8.3\">0.4572</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.8.4\">0.4831</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.8.5\">0.5049</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.8.6\">0.4487</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.8.7\">0.4768</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.8.8\">0.5072</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.8.9\">0.4355</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.8.10\">0.4713</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.9\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.9.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.9.1.1\">SMCR</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.9.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.9.2.1\">0.5270</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.9.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.9.3.1\">0.4790</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.9.4\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.9.4.1\">0.5030</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.9.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.9.5.1\">0.5291</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.9.6\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.9.6.1\">0.4727</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.9.7\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.9.7.1\">0.5009</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.9.8\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.9.8.1\">0.5191</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.9.9\">0.4426</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.9.10\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.9.10.1\">0.4808</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.10\">\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_l ltx_border_r ltx_border_t\" id=\"S5.T2.1.10.1\" rowspan=\"7\"><span class=\"ltx_text\" id=\"S5.T2.1.10.1.1\">\n<span class=\"ltx_inline-block ltx_transformed_outer\" id=\"S5.T2.1.10.1.1.1\" style=\"width:8.9pt;height:44.7pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"width:44.7pt;transform:translate(-17.92pt,-16.94pt) rotate(-90deg) ;\">\n<span class=\"ltx_p\" id=\"S5.T2.1.10.1.1.1.1\">Wikipedia</span>\n</span></span></span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T2.1.10.2\">CCA</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.10.3\">0.2639</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.10.4\">0.2154</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T2.1.10.5\">0.2396</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.10.6\">0.2883</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.10.7\">0.2255</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T2.1.10.8\">0.2569</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.10.9\">0.2575</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.10.10\">0.2152</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T2.1.10.11\">0.2363</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.11\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.11.1\">JFSSL</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.11.2\">0.4432</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.11.3\">0.3481</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.11.4\">0.3956</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.11.5\">0.4266</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.11.6\">0.3528</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.11.7\">0.3897</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.11.8\">0.4152</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.11.9\">0.3479</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.11.10\">0.3815</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.12\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.12.1\">CMDN</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.12.2\">0.5265</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.12.3\">0.4194</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.12.4\">0.4729</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.12.5\">0.5046</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.12.6\">0.4171</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.12.7\">0.4608</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.12.8\">0.4874</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.12.9\">0.3938</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.12.10\">0.4406</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.13\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.13.1\">ACMR</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.13.2\">0.6372</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.13.3\">0.4920</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.13.4\">0.5646</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.13.5\">0.6251</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.13.6\">0.4937</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.13.7\">0.5594</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.13.8\">0.5887</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.13.9\">0.4824</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.13.10\">0.5355</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.14\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.14.1\">DSCMR</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.14.2\">0.6413</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.14.3\">0.4963</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.14.4\">0.5688</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.14.5\">0.6514</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.14.6\">0.5082</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.14.7\">0.5798</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.14.8\">0.6452</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.14.9\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.14.9.1\">0.4973</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.14.10\">0.5712</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.15\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.15.1\">SSACR</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.15.2\">0.6642</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.15.3\">0.4927</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.15.4\">0.5784</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.15.5\">0.6608</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.15.6\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.15.6.1\">0.5089</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.15.7\">0.5848</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.15.8\">0.6416</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.15.9\">0.4956</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.15.10\">0.5686</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.16\">\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r\" id=\"S5.T2.1.16.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.16.1.1\">SMCR</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S5.T2.1.16.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.16.2.1\">0.7014</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S5.T2.1.16.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.16.3.1\">0.5059</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r\" id=\"S5.T2.1.16.4\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.16.4.1\">0.6036</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S5.T2.1.16.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.16.5.1\">0.6714</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S5.T2.1.16.6\">0.5003</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r\" id=\"S5.T2.1.16.7\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.16.7.1\">0.5858</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S5.T2.1.16.8\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.16.8.1\">0.6503</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S5.T2.1.16.9\">0.4959</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r\" id=\"S5.T2.1.16.10\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.16.10.1\">0.5731</span></td>\n</tr>\n</table>\n</figure>",
|
| 140 |
+
"capture": "Table 2. Comparison of cross-media retrieval performance on SciTechDaily and Wikipedia datasets"
|
| 141 |
+
},
|
| 142 |
+
"3": {
|
| 143 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S6.T3\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 3. </span>Performance of SMCR and its variants in SciTechDaily.</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_align_middle\" id=\"S6.T3.2\">\n<tr class=\"ltx_tr\" id=\"S6.T3.2.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S6.T3.2.3.1\">method</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T3.2.3.2\">mAP</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T3.2.3.3\">txt2img</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T3.2.3.4\">img2txt</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T3.2.3.5\">Average</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T3.1.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S6.T3.1.1.1\" rowspan=\"3\"><span class=\"ltx_text\" id=\"S6.T3.1.1.1.1\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S6.T3.1.1.1.1.1\">\n<span class=\"ltx_tr\" id=\"S6.T3.1.1.1.1.1.2\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S6.T3.1.1.1.1.1.2.1\">SMCR</span></span>\n<span class=\"ltx_tr\" id=\"S6.T3.1.1.1.1.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S6.T3.1.1.1.1.1.1.1\">(without )</span></span>\n</span></span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T3.1.1.2\">mAP@5</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T3.1.1.3\">0.5196</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T3.1.1.4\">0.4627</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T3.1.1.5\">0.4911</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T3.2.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T3.2.4.1\">mAP@25</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T3.2.4.2\">0.5187</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T3.2.4.3\">0.4525</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T3.2.4.4\">0.4856</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T3.2.5\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T3.2.5.1\">mAP@50</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T3.2.5.2\">0.5024</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T3.2.5.3\">0.4408</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T3.2.5.4\">0.4716</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T3.2.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S6.T3.2.2.1\" rowspan=\"3\"><span class=\"ltx_text\" id=\"S6.T3.2.2.1.1\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S6.T3.2.2.1.1.1\">\n<span class=\"ltx_tr\" id=\"S6.T3.2.2.1.1.1.2\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S6.T3.2.2.1.1.1.2.1\">SMCR</span></span>\n<span class=\"ltx_tr\" id=\"S6.T3.2.2.1.1.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S6.T3.2.2.1.1.1.1.1\">(without )</span></span>\n</span></span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T3.2.2.2\">mAP@5</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T3.2.2.3\">0.5155</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T3.2.2.4\">0.4513</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T3.2.2.5\">0.4834</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T3.2.6\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T3.2.6.1\">mAP@25</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T3.2.6.2\">0.5073</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T3.2.6.3\">0.4474</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T3.2.6.4\">0.4773</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T3.2.7\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T3.2.7.1\">mAP@50</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T3.2.7.2\">0.4972</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T3.2.7.3\">0.4386</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T3.2.7.4\">0.4679</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T3.2.8\">\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_l ltx_border_r ltx_border_t\" id=\"S6.T3.2.8.1\" rowspan=\"3\"><span class=\"ltx_text\" id=\"S6.T3.2.8.1.1\">SMCR</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T3.2.8.2\">mAP@5</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T3.2.8.3\">0.5270</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T3.2.8.4\">0.4790</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T3.2.8.5\">0.5030</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T3.2.9\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T3.2.9.1\">mAP@25</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T3.2.9.2\">0.5291</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T3.2.9.3\">0.4727</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T3.2.9.4\">0.5009</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T3.2.10\">\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r\" id=\"S6.T3.2.10.1\">mAP@50</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S6.T3.2.10.2\">0.5191</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S6.T3.2.10.3\">0.4426</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r\" id=\"S6.T3.2.10.4\">0.4808</td>\n</tr>\n</table>\n</figure>",
|
| 144 |
+
"capture": "Table 3. Performance of SMCR and its variants in SciTechDaily."
|
| 145 |
+
},
|
| 146 |
+
"4": {
|
| 147 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S6.T4\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 4. </span>Performance of SMCR and its variants in Wikipedia.</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_align_middle\" id=\"S6.T4.2\">\n<tr class=\"ltx_tr\" id=\"S6.T4.2.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S6.T4.2.3.1\">method</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T4.2.3.2\">mAP</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.2.3.3\">txt2img</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.2.3.4\">img2txt</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T4.2.3.5\">Average</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T4.1.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S6.T4.1.1.1\" rowspan=\"3\"><span class=\"ltx_text\" id=\"S6.T4.1.1.1.1\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S6.T4.1.1.1.1.1\">\n<span class=\"ltx_tr\" id=\"S6.T4.1.1.1.1.1.2\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S6.T4.1.1.1.1.1.2.1\">SMCR</span></span>\n<span class=\"ltx_tr\" id=\"S6.T4.1.1.1.1.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S6.T4.1.1.1.1.1.1.1\">(without )</span></span>\n</span></span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T4.1.1.2\">mAP@5</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.3\">0.6919</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.4\">0.4983</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T4.1.1.5\">0.5951</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T4.2.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T4.2.4.1\">mAP@25</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.2.4.2\">0.6622</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.2.4.3\">0.4937</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T4.2.4.4\">0.5779</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T4.2.5\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T4.2.5.1\">mAP@50</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.2.5.2\">0.6418</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.2.5.3\">0.4901</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T4.2.5.4\">0.5659</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T4.2.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S6.T4.2.2.1\" rowspan=\"3\"><span class=\"ltx_text\" id=\"S6.T4.2.2.1.1\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S6.T4.2.2.1.1.1\">\n<span class=\"ltx_tr\" id=\"S6.T4.2.2.1.1.1.2\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S6.T4.2.2.1.1.1.2.1\">SMCR</span></span>\n<span class=\"ltx_tr\" id=\"S6.T4.2.2.1.1.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S6.T4.2.2.1.1.1.1.1\">(without )</span></span>\n</span></span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T4.2.2.2\">mAP@5</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.2.2.3\">0.6806</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.2.2.4\">0.5038</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T4.2.2.5\">0.5922</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T4.2.6\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T4.2.6.1\">mAP@25</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.2.6.2\">0.6596</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.2.6.3\">0.4980</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T4.2.6.4\">0.5788</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T4.2.7\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T4.2.7.1\">mAP@50</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.2.7.2\">0.6416</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.2.7.3\">0.4938</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T4.2.7.4\">0.5677</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T4.2.8\">\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_l ltx_border_r ltx_border_t\" id=\"S6.T4.2.8.1\" rowspan=\"3\"><span class=\"ltx_text\" id=\"S6.T4.2.8.1.1\">SMCR</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T4.2.8.2\">mAP@5</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.2.8.3\">0.7014</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.2.8.4\">0.5059</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T4.2.8.5\">0.6036</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T4.2.9\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T4.2.9.1\">mAP@25</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.2.9.2\">0.6714</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.2.9.3\">0.5003</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T4.2.9.4\">0.5858</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T4.2.10\">\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r\" id=\"S6.T4.2.10.1\">mAP@50</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S6.T4.2.10.2\">0.6503</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S6.T4.2.10.3\">0.4959</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r\" id=\"S6.T4.2.10.4\">0.5731</td>\n</tr>\n</table>\n</figure>",
|
| 148 |
+
"capture": "Table 4. Performance of SMCR and its variants in Wikipedia."
|
| 149 |
+
}
|
| 150 |
+
},
|
| 151 |
+
"image_paths": {
|
| 152 |
+
"1": {
|
| 153 |
+
"figure_path": "2203.08615v3_figure_1.png",
|
| 154 |
+
"caption": "Figure 1. Graphical representation of our proposed SMCR. It consists of two processes that play the minimax game: a feature mapper generating intra-media semantics-discriminative, inter-media semantics-consistent, and intra-semantics media-discriminative representations, and a media discriminator distinguishing the original media of representations.",
|
| 155 |
+
"url": "http://arxiv.org/html/2203.08615v3/x1.png"
|
| 156 |
+
},
|
| 157 |
+
"2": {
|
| 158 |
+
"figure_path": "2203.08615v3_figure_2.png",
|
| 159 |
+
"caption": "Figure 2. The performance of (a) using text to retrieve image, (b) using image to retrieve text, and (c) the average retrieval performance with various parameter and in Wikipedia dataset.",
|
| 160 |
+
"url": "http://arxiv.org/html/2203.08615v3/x2.png"
|
| 161 |
+
}
|
| 162 |
+
},
|
| 163 |
+
"validation": true,
|
| 164 |
+
"references": [
|
| 165 |
+
{
|
| 166 |
+
"1": {
|
| 167 |
+
"title": "On the Role of Correlation and Abstraction in\nCross-Modal Multimedia Retrieval.",
|
| 168 |
+
"author": "Jose Costa Pereira,\nEmanuele Coviello, Gabriel Doyle,\nNikhil Rasiwasia, Gert R.G. Lanckriet,\nRoger Levy, and Nuno Vasconcelos.\n2014.",
|
| 169 |
+
"venue": "IEEE Transactions on Pattern Analysis and\nMachine Intelligence 36, 3\n(2014), 521\u2013535.",
|
| 170 |
+
"url": null
|
| 171 |
+
}
|
| 172 |
+
},
|
| 173 |
+
{
|
| 174 |
+
"2": {
|
| 175 |
+
"title": "Identity-aware CycleGAN for face photo-sketch\nsynthesis and recognition.",
|
| 176 |
+
"author": "Yuke Fang, Weihong Deng,\nJunping Du, and Jiani Hu.\n2020.",
|
| 177 |
+
"venue": "Pattern Recognition 102\n(2020), 107249.",
|
| 178 |
+
"url": null
|
| 179 |
+
}
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"3": {
|
| 183 |
+
"title": "Cross-Modal Retrieval with Correspondence\nAutoencoder. In Proceedings of the 22nd ACM\nInternational Conference on Multimedia (MM \u201914).\nACM, 7\u201316.",
|
| 184 |
+
"author": "Fangxiang Feng, Xiaojie\nWang, and Ruifan Li. 2014.",
|
| 185 |
+
"venue": "",
|
| 186 |
+
"url": null
|
| 187 |
+
}
|
| 188 |
+
},
|
| 189 |
+
{
|
| 190 |
+
"4": {
|
| 191 |
+
"title": "Unsupervised Domain Adaptation by Backpropagation.\nIn Proceedings of the 32nd International Conference\non International Conference on Machine Learning\n(ICML \u201915). JMLR,\n1180\u20131189.",
|
| 192 |
+
"author": "Yaroslav Ganin and\nVictor Lempitsky. 2015.",
|
| 193 |
+
"venue": "",
|
| 194 |
+
"url": null
|
| 195 |
+
}
|
| 196 |
+
},
|
| 197 |
+
{
|
| 198 |
+
"5": {
|
| 199 |
+
"title": "A multi-view embedding space for modeling internet\nimages, tags, and their semantics.",
|
| 200 |
+
"author": "Yunchao Gong, Qifa Ke,\nMichael Isard, and Svetlana Lazebnik.\n2014.",
|
| 201 |
+
"venue": "International Journal of Computer Vision\n106, 2 (2014),\n210\u2013233.",
|
| 202 |
+
"url": null
|
| 203 |
+
}
|
| 204 |
+
},
|
| 205 |
+
{
|
| 206 |
+
"6": {
|
| 207 |
+
"title": "Generative Adversarial Nets. In\nProceedings of the 27th International Conference on\nNeural Information Processing Systems - Volume 2\n(NIPS \u201914). MIT,\n2672\u20132680.",
|
| 208 |
+
"author": "Ian J. Goodfellow, Jean\nPouget-Abadie, Mehdi Mirza, Bing Xu,\nDavid Warde-Farley, Sherjil Ozair,\nAaron Courville, and Yoshua Bengio.\n2014.",
|
| 209 |
+
"venue": "",
|
| 210 |
+
"url": null
|
| 211 |
+
}
|
| 212 |
+
},
|
| 213 |
+
{
|
| 214 |
+
"7": {
|
| 215 |
+
"title": "Canonical Correlation Analysis: An Overview with\nApplication to Learning Methods.",
|
| 216 |
+
"author": "David R. Hardoon, Sandor\nSzedmak, and John Shawe-Taylor.\n2004.",
|
| 217 |
+
"venue": "Neural Computation 16,\n12 (2004), 2639\u20132664.",
|
| 218 |
+
"url": null
|
| 219 |
+
}
|
| 220 |
+
},
|
| 221 |
+
{
|
| 222 |
+
"8": {
|
| 223 |
+
"title": "Unsupervised cross-modal retrieval through\nadversarial learning. In 2017 IEEE International\nConference on Multimedia and Expo (ICME \u201917).\n1153\u20131158.",
|
| 224 |
+
"author": "Li He, Xing Xu,\nHuimin Lu, Yang Yang,\nFumin Shen, and Heng Tao Shen.\n2017.",
|
| 225 |
+
"venue": "",
|
| 226 |
+
"url": null
|
| 227 |
+
}
|
| 228 |
+
},
|
| 229 |
+
{
|
| 230 |
+
"9": {
|
| 231 |
+
"title": "Deep Metric Learning Using Triplet Network. In\nSimilarity-Based Pattern Recognition.\nSpringer, 84\u201392.",
|
| 232 |
+
"author": "Elad Hoffer and Nir\nAilon. 2015.",
|
| 233 |
+
"venue": "",
|
| 234 |
+
"url": null
|
| 235 |
+
}
|
| 236 |
+
},
|
| 237 |
+
{
|
| 238 |
+
"10": {
|
| 239 |
+
"title": "Anomaly Detection Using Local Kernel Density\nEstimation and Context-Based Regression.",
|
| 240 |
+
"author": "Weiming Hu, Jun Gao,\nBing Li, Ou Wu, Junping\nDu, and Stephen Maybank.\n2020.",
|
| 241 |
+
"venue": "IEEE Transactions on Knowledge and Data\nEngineering 32, 2\n(2020), 218\u2013233.",
|
| 242 |
+
"url": null
|
| 243 |
+
}
|
| 244 |
+
},
|
| 245 |
+
{
|
| 246 |
+
"11": {
|
| 247 |
+
"title": "Common Semantic Representation Method Based on\nObject Attention and Adversarial Learning for Cross-Modal Data in IoV.",
|
| 248 |
+
"author": "Feifei Kou, Junping Du,\nWanqiu Cui, Lei Shi,\nPengchao Cheng, Jiannan Chen, and\nJinxuan Li. 2019.",
|
| 249 |
+
"venue": "IEEE Transactions on Vehicular Technology\n68, 12 (2019),\n11588\u201311598.",
|
| 250 |
+
"url": null
|
| 251 |
+
}
|
| 252 |
+
},
|
| 253 |
+
{
|
| 254 |
+
"12": {
|
| 255 |
+
"title": "Social network search based on semantic analysis\nand learning.",
|
| 256 |
+
"author": "Feifei Kou, Junping Du,\nYijiang He, and Lingfei Ye.\n2016.",
|
| 257 |
+
"venue": "CAAI Transactions on Intelligence\nTechnology 1, 4 (2016),\n293\u2013302.",
|
| 258 |
+
"url": null
|
| 259 |
+
}
|
| 260 |
+
},
|
| 261 |
+
{
|
| 262 |
+
"13": {
|
| 263 |
+
"title": "Self-Supervised Adversarial Hashing Networks for\nCross-Modal Retrieval. In 2018 IEEE/CVF Conference\non Computer Vision and Pattern Recognition (CVPR\n\u201918). IEEE, 4242\u20134251.",
|
| 264 |
+
"author": "Chao Li, Cheng Deng,\nNing Li, Wei Liu, Xinbo\nGao, and Dacheng Tao. 2018a.",
|
| 265 |
+
"venue": "",
|
| 266 |
+
"url": null
|
| 267 |
+
}
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"14": {
|
| 271 |
+
"title": "Self-Supervised Adversarial Hashing Networks for\nCross-Modal Retrieval. In 2018 IEEE/CVF Conference\non Computer Vision and Pattern Recognition (CVPR\n\u201918). IEEE, 4242\u20134251.",
|
| 272 |
+
"author": "Chao Li, Cheng Deng,\nNing Li, Wei Liu, Xinbo\nGao, and Dacheng Tao. 2018b.",
|
| 273 |
+
"venue": "",
|
| 274 |
+
"url": null
|
| 275 |
+
}
|
| 276 |
+
},
|
| 277 |
+
{
|
| 278 |
+
"15": {
|
| 279 |
+
"title": "Distributed extended Kalman filter with nonlinear\nconsensus estimate.",
|
| 280 |
+
"author": "Wenling Li, Yingmin Jia,\nand Junping Du. 2017a.",
|
| 281 |
+
"venue": "Journal of the Franklin Institute\n354, 17 (2017),\n7983\u20137995.",
|
| 282 |
+
"url": null
|
| 283 |
+
}
|
| 284 |
+
},
|
| 285 |
+
{
|
| 286 |
+
"16": {
|
| 287 |
+
"title": "Recursive state estimation for complex networks\nwith random coupling strength.",
|
| 288 |
+
"author": "Wenling Li, Yingmin Jia,\nand Junping Du. 2017b.",
|
| 289 |
+
"venue": "Neurocomputing 219\n(2017), 1\u20138.",
|
| 290 |
+
"url": null
|
| 291 |
+
}
|
| 292 |
+
},
|
| 293 |
+
{
|
| 294 |
+
"17": {
|
| 295 |
+
"title": "Implementation of Academic News Recommendation\nSystem Based on User Profile and Message Semantics. In\nComputational Intelligence and Intelligent\nSystems. Springer, 531\u2013540.",
|
| 296 |
+
"author": "Weiling Li, Yong Tang,\nGuohua Chen, Danyang Xiao, and\nChengzhe Yuan. 2018c.",
|
| 297 |
+
"venue": "",
|
| 298 |
+
"url": null
|
| 299 |
+
}
|
| 300 |
+
},
|
| 301 |
+
{
|
| 302 |
+
"18": {
|
| 303 |
+
"title": "Fine-Grained Cross-Media Representation Learning\nwith Deep Quantization Attention Network. In\nProceedings of the 27th ACM International\nConference on Multimedia (MM \u201919).\nACM, 1313\u20131321.",
|
| 304 |
+
"author": "Meiyu Liang, Junping Du,\nWu Liu, Zhe Xue, Yue\nGeng, and Congxian Yang.\n2019.",
|
| 305 |
+
"venue": "",
|
| 306 |
+
"url": null
|
| 307 |
+
}
|
| 308 |
+
},
|
| 309 |
+
{
|
| 310 |
+
"19": {
|
| 311 |
+
"title": "Cross-Media Semantic Correlation Learning Based on\nDeep Hash Network and Semantic Expansion for Social Network Cross-Media\nSearch.",
|
| 312 |
+
"author": "Meiyu Liang, Junping Du,\nCongxian Yang, Zhe Xue,\nHaisheng Li, Feifei Kou, and\nYue Geng. 2020.",
|
| 313 |
+
"venue": "IEEE Transactions on Neural Networks and\nLearning Systems 31, 9\n(2020), 3634\u20133648.",
|
| 314 |
+
"url": null
|
| 315 |
+
}
|
| 316 |
+
},
|
| 317 |
+
{
|
| 318 |
+
"20": {
|
| 319 |
+
"title": "Generative Semantic Manipulation with\nMask-Contrasting GAN. In European Conference on\nComputer Vision (ECCV \u201918).\nSpringer, 574\u2013590.",
|
| 320 |
+
"author": "Xiaodan Liang, Hao Zhang,\nLiang Lin, and Eric Xing.\n2018.",
|
| 321 |
+
"venue": "",
|
| 322 |
+
"url": null
|
| 323 |
+
}
|
| 324 |
+
},
|
| 325 |
+
{
|
| 326 |
+
"21": {
|
| 327 |
+
"title": "A Cross Media Search Method for Social Networks\nBased on Adversarial Learning and Semantic Similarity.",
|
| 328 |
+
"author": "Chong Liu, Junping Du,\nand Nan Zhou. 2021.",
|
| 329 |
+
"venue": "SCIENTIA SINICA Informationis\n51, 5 (2021),\n779\u2013794.",
|
| 330 |
+
"url": null
|
| 331 |
+
}
|
| 332 |
+
},
|
| 333 |
+
{
|
| 334 |
+
"22": {
|
| 335 |
+
"title": "Multimodal Deep Learning. In\nProceedings of the 28th International Conference on\nInternational Conference on Machine Learning (ICML\n\u201911). Omnipress, 689\u2013696.",
|
| 336 |
+
"author": "Jiquan Ngiam, Aditya\nKhosla, Mingyu Kim, Juhan Nam,\nHonglak Lee, and Andrew Y. Ng.\n2011.",
|
| 337 |
+
"venue": "",
|
| 338 |
+
"url": null
|
| 339 |
+
}
|
| 340 |
+
},
|
| 341 |
+
{
|
| 342 |
+
"23": {
|
| 343 |
+
"title": "Cross-Media Shared Representation by Hierarchical\nLearning with Multiple Deep Networks. In\nProceedings of the Twenty-Fifth International Joint\nConference on Artificial Intelligence (IJCAI \u201916).\nAAAI, 3846\u20133853.",
|
| 344 |
+
"author": "Yuxin Peng, Xin Huang,\nand Jinwei Qi. 2016.",
|
| 345 |
+
"venue": "",
|
| 346 |
+
"url": null
|
| 347 |
+
}
|
| 348 |
+
},
|
| 349 |
+
{
|
| 350 |
+
"24": {
|
| 351 |
+
"title": "An Overview of Cross-Media Retrieval: Concepts,\nMethodologies, Benchmarks, and Challenges.",
|
| 352 |
+
"author": "Yuxin Peng, Xin Huang,\nand Yunzhen Zhao. 2018.",
|
| 353 |
+
"venue": "IEEE Transactions on Circuits and Systems for\nVideo Technology 28, 9\n(2018), 2372\u20132385.",
|
| 354 |
+
"url": null
|
| 355 |
+
}
|
| 356 |
+
},
|
| 357 |
+
{
|
| 358 |
+
"25": {
|
| 359 |
+
"title": "Unsupervised Representation Learning with Deep\nConvolutional Generative Adversarial Networks.",
|
| 360 |
+
"author": "Alec Radford, Luke Metz,\nand Soumith Chintala. 2016.",
|
| 361 |
+
"venue": "CoRR abs/1511.06434\n(2016).",
|
| 362 |
+
"url": null
|
| 363 |
+
}
|
| 364 |
+
},
|
| 365 |
+
{
|
| 366 |
+
"26": {
|
| 367 |
+
"title": "Examining Personalization in Academic Web Search.\nIn Proceedings of the 26th ACM Conference on\nHypertext & Social Media (HT \u201915).\nACM, 103\u2013111.",
|
| 368 |
+
"author": "Sara Salehi, Jia Tina Du,\nand Helen Ashman. 2015.",
|
| 369 |
+
"venue": "",
|
| 370 |
+
"url": null
|
| 371 |
+
}
|
| 372 |
+
},
|
| 373 |
+
{
|
| 374 |
+
"27": {
|
| 375 |
+
"title": "Deep Collaborative Filtering with Multi-Aspect\nInformation in Heterogeneous Networks.",
|
| 376 |
+
"author": "Chuan Shi, Xiaotian Han,\nLi Song, Xiao Wang,\nSenzhang Wang, Junping Du, and\nPhilip S. Yu. 2021.",
|
| 377 |
+
"venue": "IEEE Transactions on Knowledge and Data\nEngineering 33, 4\n(2021), 1413\u20131425.",
|
| 378 |
+
"url": null
|
| 379 |
+
}
|
| 380 |
+
},
|
| 381 |
+
{
|
| 382 |
+
"28": {
|
| 383 |
+
"title": "Adversarial Cross-Modal Retrieval. In\nProceedings of the 25th ACM International\nConference on Multimedia (MM \u201917).\nACM, 154\u2013162.",
|
| 384 |
+
"author": "Bokun Wang, Yang Yang,\nXing Xu, Alan Hanjalic, and\nHeng Tao Shen. 2017.",
|
| 385 |
+
"venue": "",
|
| 386 |
+
"url": null
|
| 387 |
+
}
|
| 388 |
+
},
|
| 389 |
+
{
|
| 390 |
+
"29": {
|
| 391 |
+
"title": "Joint Feature Selection and Subspace Learning for\nCross-Modal Retrieval.",
|
| 392 |
+
"author": "Kaiye Wang, Ran He,\nLiang Wang, Wei Wang, and\nTieniu Tan. 2016.",
|
| 393 |
+
"venue": "IEEE Transactions on Pattern Analysis and\nMachine Intelligence 38, 10\n(2016), 2010\u20132023.",
|
| 394 |
+
"url": null
|
| 395 |
+
}
|
| 396 |
+
},
|
| 397 |
+
{
|
| 398 |
+
"30": {
|
| 399 |
+
"title": "Learning Coupled Feature Spaces for Cross-Modal\nMatching. In Proceedings of the 2013 IEEE\nInternational Conference on Computer Vision (ICCV\n\u201913). IEEE, 2088\u20132095.",
|
| 400 |
+
"author": "Kaiye Wang, Ran He,\nWei Wang, Liang Wang, and\nTieniu Tan. 2013.",
|
| 401 |
+
"venue": "",
|
| 402 |
+
"url": null
|
| 403 |
+
}
|
| 404 |
+
},
|
| 405 |
+
{
|
| 406 |
+
"31": {
|
| 407 |
+
"title": "Learning to Generate Time-Lapse Videos Using\nMulti-Stage Dynamic Generative Adversarial Networks. In\nProceedings of the IEEE Conference on Computer\nVision and Pattern Recognition (CVPR \u201918).\nIEEE, 2364\u20132373.",
|
| 408 |
+
"author": "Xiong Wei, Luo Wenhan,\nMa Lin, Wei Liu, and\nJiebo Luo and. 2018.",
|
| 409 |
+
"venue": "",
|
| 410 |
+
"url": null
|
| 411 |
+
}
|
| 412 |
+
},
|
| 413 |
+
{
|
| 414 |
+
"32": {
|
| 415 |
+
"title": "Modality-Dependent Cross-Media Retrieval.",
|
| 416 |
+
"author": "Yunchao Wei, Yao Zhao,\nZhenfeng Zhu, Shikui Wei,\nYanhui Xiao, Jiashi Feng, and\nShuicheng Yan. 2016.",
|
| 417 |
+
"venue": "ACM Transactions on Intelligent Systems and\nTechnology 7, 4, Article\n57 (2016).",
|
| 418 |
+
"url": null
|
| 419 |
+
}
|
| 420 |
+
},
|
| 421 |
+
{
|
| 422 |
+
"33": {
|
| 423 |
+
"title": "Image Fusion Based on Nonsubsampled Contourlet\nTransform and Saliency-Motivated Pulse Coupled Neural Networks.",
|
| 424 |
+
"author": "Liang Xu, Junping Du,\nand Qingping Li. 2013.",
|
| 425 |
+
"venue": "Mathematical Problems in Engineering\n2013, Article 57\n(2013).",
|
| 426 |
+
"url": null
|
| 427 |
+
}
|
| 428 |
+
},
|
| 429 |
+
{
|
| 430 |
+
"34": {
|
| 431 |
+
"title": "Deep low-rank subspace ensemble for multi-view\nclustering.",
|
| 432 |
+
"author": "Zhe Xue, Junping Du,\nDawei Du, and Siwei Lyu.\n2019.",
|
| 433 |
+
"venue": "Information Sciences 482\n(2019), 210\u2013227.",
|
| 434 |
+
"url": null
|
| 435 |
+
}
|
| 436 |
+
},
|
| 437 |
+
{
|
| 438 |
+
"35": {
|
| 439 |
+
"title": "Clustering-Induced Adaptive Structure Enhancing\nNetwork for Incomplete Multi-View Data. In\nProceedings of the Thirtieth International Joint\nConference on Artificial Intelligence (IJCAI \u201921).\nInternational Joint Conferences on Artificial\nIntelligence Organization, 3235\u20133241.",
|
| 440 |
+
"author": "Zhe Xue, Junping Du,\nChangwei Zheng, Jie Song,\nWenqi Ren, and Meiyu Liang.\n2021.",
|
| 441 |
+
"venue": "",
|
| 442 |
+
"url": null
|
| 443 |
+
}
|
| 444 |
+
},
|
| 445 |
+
{
|
| 446 |
+
"36": {
|
| 447 |
+
"title": "Deep correlation for matching images and text. In\n2015 IEEE Conference on Computer Vision and Pattern\nRecognition (CVPR \u201915). IEEE,\n3441\u20133450.",
|
| 448 |
+
"author": "Fei Yan and Krystian\nMikolajczyk. 2015.",
|
| 449 |
+
"venue": "",
|
| 450 |
+
"url": null
|
| 451 |
+
}
|
| 452 |
+
},
|
| 453 |
+
{
|
| 454 |
+
"37": {
|
| 455 |
+
"title": "Ontology-based intelligent information retrieval\nsystem.",
|
| 456 |
+
"author": "Yuehua Yang, Junping Du,\nand Yuan Ping. 2015.",
|
| 457 |
+
"venue": "Journal of Software 26\n(2015), 1675\u20131687.",
|
| 458 |
+
"url": null
|
| 459 |
+
}
|
| 460 |
+
},
|
| 461 |
+
{
|
| 462 |
+
"38": {
|
| 463 |
+
"title": "Transfer Learning with Dynamic Adversarial\nAdaptation Network. In 2019 IEEE International\nConference on Data Mining (ICDM \u201919).\n778\u2013786.",
|
| 464 |
+
"author": "Chaohui Yu, Jindong Wang,\nYiqiang Chen, and Meiyu Huang.\n2019.",
|
| 465 |
+
"venue": "",
|
| 466 |
+
"url": null
|
| 467 |
+
}
|
| 468 |
+
},
|
| 469 |
+
{
|
| 470 |
+
"39": {
|
| 471 |
+
"title": "Learning Cross-Media Joint Representation With\nSparse and Semisupervised Regularization.",
|
| 472 |
+
"author": "Xiaohua Zhai, Yuxin Peng,\nand Jianguo Xiao. 2014.",
|
| 473 |
+
"venue": "IEEE Transactions on Circuits and Systems for\nVideo Technology 24, 6\n(2014), 965\u2013978.",
|
| 474 |
+
"url": null
|
| 475 |
+
}
|
| 476 |
+
},
|
| 477 |
+
{
|
| 478 |
+
"40": {
|
| 479 |
+
"title": "Deep Supervised Cross-Modal Retrieval. In\n2019 IEEE/CVF Conference on Computer Vision and\nPattern Recognition (CVPR \u201919).\nIEEE, 10386\u201310395.",
|
| 480 |
+
"author": "Liangli Zhen, Peng Hu,\nXu Wang, and Dezhong Peng.\n2019.",
|
| 481 |
+
"venue": "",
|
| 482 |
+
"url": null
|
| 483 |
+
}
|
| 484 |
+
}
|
| 485 |
+
],
|
| 486 |
+
"url": "http://arxiv.org/html/2203.08615v3"
|
| 487 |
+
}
|
20241030/2206.03695v3.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2206.14254v4.json
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "No Imputation without Representation",
|
| 3 |
+
"abstract": "By filling in missing values in datasets, imputation allows these datasets to be used with algorithms that cannot handle missing values by themselves. However, missing values may in principle contribute useful information that is lost through imputation. The missing-indicator approach can be used in combination with imputation to instead represent this information as a part of the dataset. There are several theoretical considerations why missing-indicators may or may not be beneficial, but there has not been any large-scale practical experiment on real-life datasets to test this question for machine learning predictions. We perform this experiment for three imputation strategies and a range of different classification algorithms, on the basis of twenty real-life datasets. In a follow-up experiment, we determine attribute-specific missingness thresholds for each classifier above which missing-indicators are more likely than not to increase classification performance. And in a second follow-up experiment, we evaluate numerical imputation of one-hot encoded categorical attributes. We reach the following conclusions. Firstly, missing-indicators generally increase classification performance. Secondly, with missing-indicators, nearest neighbour and iterative imputation do not lead to better performance than simple mean/mode imputation. Thirdly, for decision trees, pruning is necessary to prevent overfitting. Fourthly, the thresholds above which missing-indicators are more likely than not to improve performance are lower for categorical attributes than for numerical attributes. Lastly, mean imputation of numerical attributes preserves some of the information from missing values. Consequently, when not using missing-indicators it can be advantageous to apply mean imputation to one-hot encoded categorical attributes instead of mode imputation.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Missing values are a frequent issue in real-life datasets, and the subject of a large body of ongoing research. Some implementations of machine learning algorithms can handle missing values natively, requiring no further action by practitioners. But whenever this is not the case, a common general strategy is to replace the missing value with an estimated value: imputation. An advantage of imputation is that we obtain a complete dataset, to which we can apply any and all algorithms that make no special provision for missing values. However, missing values may be informative, and a disadvantage of imputation is that it removes this information.\nThe missing-indicator approach [12 ###reference_b12###] is an old proposal to represent and thereby preserve the information encoded by missing values. For every original attribute, it adds a new binary \u2018indicator\u2019 or \u2018dummy\u2019 attribute that takes a value of 1 if the value for the original attribute is missing, and 0 if not (Figure 1 ###reference_###).111Some authors use the opposite convention, letting the indicator express non-missingness. The missing-indicator approach is often presented as an alternative to imputation, but since it does not resolve the missing values in the original attributes, it can only be used in addition to, not instead of imputation.\nIt is an open question whether missing-indicators should be used for predictive tasks in machine learning [75 ###reference_b75###]. Both imputation and the missing-indicator approach originate in the statistical literature. While imputation strategies have been the subject of a rich body of research, the missing-indicator approach has not received a large amount of attention, and is often dismissed or disregarded in overviews of approaches towards missing values.\nIn the context of machine learning, the effect of missing-indicators can be framed as follows. On the one hand, the addition of missing-indicators results in a more complete, higher-dimensional representation of the data. On the other hand, their omission corresponds to a form of dimensionality reduction, which may increase the efficiency and effectiveness of a dataset by eliminating redundancy.\nTo determine whether this trade-off is useful, a key question is to which extent missing values in a given dataset are informative. If they are not, the phrase \u201cmissing at random\u201d (MAR) [66 ###reference_b66###] is used to indicate that the distribution of missing values is dependent on the known values, while the stricter phrase \u201cmissing completely at random\u201d (MCAR) denotes values that are distributed truly randomly. In contrast, informative missing values are often denoted as \u201cmissing not at random\u201d (MNAR).\nIn this respect, it is often argued that one should distinguish between missing values that could in principle have been obtained, and missing values that fundamentally do not exist, like attributes related to pregnancy tests for male subjects.222We are grateful to an anonymous reviewer for this example. In the latter case, the missing values are definitely informative. However, such clear-cut cases may be comparatively rare. Moreover, it does not follow that the missing values in the former case are definitely non-informative. In fact, for real-life datasets, unless we have specific knowledge about the process responsible for the missing values, we have to assume some degree of informativeness in principle.333This is acknowledged by authors working under the assumption of MAR, e.g. \u201cWhen data are missing for reasons beyond the investigator\u2019s control, one can never be certain whether MAR holds. The MAR hypothesis in such datasets cannot be formally tested unless the missing values, or at least a sample of them, are available from an external source.\u201d [69 ###reference_b69###]\nNonetheless, it has been argued that in practice, the attributes of a dataset can be sufficiently redundant that one can get away with assuming that its missing values are MAR [69 ###reference_b69###]. This means that most of the information contained by the missing values should in principle be recoverable through imputation. But even if this is so, imputation may not always perform optimally, in which case missing-indicators may still prove useful for machine learning.\nA more subtle point is that even when missing values are informative, the information they encode need not be lost completely through imputation. This is particularly evident in the case of numerically encoded binary attributes (e.g. 0 and 1), where imputation can represent missing values as a third, intermediary value (e.g. 0.5). More generally, Le Morvan et al. [49 ###reference_b49###] have recently observed that almost all deterministic imputation functions map records with missing values to distinct manifolds in the attribute space that can in principle be identified by sufficiently powerful algorithms. Nevertheless, missing-indicators can potentially make this learning task easier.\nIn light of these conflicting theoretical arguments, the usefulness of missing-indicators for real-life machine learning problems is an interesting empirical question. However, previous experiments in this direction have been limited in scope and number. These limitations include the use of only one or a handful of datasets, the use of datasets from which values have been removed artificially, and not comparing the same imputation strategies with and without missing-indicators.\nThe purpose of the present paper is straightforward. On the basis of twenty real-life classification problems with naturally occurring missing values, we measure the performance of a range of popular classification algorithms, using three common types of imputation, with and without missing-indicators. This allows us to evaluate the effect of using missing-indicators, as well as the choice of imputation strategy.\nMoreover, we conduct three follow-up experiments to gain a better understanding of when and why missing-indicators can be useful. In the first, we determine whether this is influenced by the type (categorical or numerical) and the amount of missing values of a given attribute. In the second, we test the hypothesis that numerical imputation partially preserves the information from missing values. And in the third follow-up experiment, we compare missing-indicators to two model-specific approaches to missing values for nearest neighbour and decision tree classifiers.\nIn Section 2 ###reference_###, we provide a brief overview of the existing literature on missing-indicators, including previous experimental evaluations. In Section 3 ###reference_###, we describe our experimental setup. We report our results in Section 4 ###reference_### and conclude in Section 5 ###reference_###."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Background",
|
| 15 |
+
"text": "We start with a brief discussion of the origins and reception of the missing-indicator approach, as well as previous experimental evaluations of the use of missing-indicators in prediction tasks."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "2.1",
|
| 19 |
+
"parent_section_id": "2",
|
| 20 |
+
"section_name": "Origins and Reception",
|
| 21 |
+
"text": "The missing-indicator approach originates in the literature on linear regression. It dates back to at least Cohen [12 ###reference_b12###], who pointed out that values in real-life datasets are typically not missing completely at random, and that the distribution of missing values may in particular depend on the values of the attribute that is to be predicted. He proposed that each attribute could be said to have two \u2018aspects\u2019, its value, and whether that value is present to begin with, which should be encoded with a pair of variables. For missing attribute values, the first of these variables was to be filled in with the mean of the known values, although other applications might call for different values. Cohen\u2019s proposal was subsequently expanded in [13 ###reference_b13###], but received only limited recognition in the following years [46 ###reference_b46###, 76 ###reference_b76###, 11 ###reference_b11###, 42 ###reference_b42###, 4 ###reference_b4###, 57 ###reference_b57###].\nCohen\u2019s proposal was subjected to a formal analysis by Jones [44 ###reference_b44###], who showed that, if one assumes that missing values are MAR, and the true linear regression model does not contain any terms related to missingness, it produces biased estimates of the regression coefficients (unless the sample covariance between independent variables is zero). However, these assumptions run directly counter to the position set out in [13 ###reference_b13###] that a priori, the missingness of each attribute is a possible explanatory factor, that it is safer not to assume that missing values are distributed randomly, and that the usefulness of missing-indicators is ultimately an empirical question.\nAllison [2 ###reference_b2###], motivated by [44 ###reference_b44###] and working under the general assumption of MAR, dismissed missing-indicators as \u201cclearly unacceptable\u201d, before conceding that they in fact produce optimal estimates when the missing value is not just missing, but cannot exist, such as the marital quality of an unmarried couple. However, this semantic distinction may not always be clear-cut in practice, and the more pertinent question may be whether missing values are informative. Allison [3 ###reference_b3###] later acknowledged that missing-indicators may lead to better predictions and their use for that purpose was acceptable. Missing-indicators have also been dismissed in [61 ###reference_b61###, 70 ###reference_b70###, 36 ###reference_b36###, 5 ###reference_b5###], and are frequently omitted in overviews of missing data strategies [69 ###reference_b69###, 25 ###reference_b25###, 23 ###reference_b23###, 32 ###reference_b32###, 16 ###reference_b16###]."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2.2",
|
| 25 |
+
"parent_section_id": "2",
|
| 26 |
+
"section_name": "Previous Experiments",
|
| 27 |
+
"text": "Only a handful of experimental comparisons of missing data approaches have included the missing-indicator approach, and these have been limited in scope. [81 ###reference_b81###] and [56 ###reference_b56###] only use a single dataset with randomly removed values, and base their evaluation on the performance of a single algorithm (respectively a neural network and linear regression). The authors of [59 ###reference_b59###] use three classification algorithms and 22 datasets, but again with randomly removed values, explicitly assuming an MCAR context. They conclude that imputation outperforms missing-indicators, but the comparison is not like-for-like, since it involves several forms of imputation but only combines indicator attributes with zero imputation. The authors of [41 ###reference_b41###] compare missing-indicators with zero imputation against several other forms of imputation without missing-indicators on one real dataset, for logistic regression. However, they do not evaluate predictive performance.\nDing & Simonoff [18 ###reference_b18###] conduct a more extensive investigation, using insights from a series of Monte Carlo simulations to systematically remove values from 36 datasets to simulate different forms of missingness. They use these datasets to compare zero imputation444Presumably, they use one-hot encoding for categorical attributes, in which case zero imputation is equivalent to treating missing values as a separate category, but they do not state this explicitly. with indicator attributes against mean/mode imputation without, as well as a number of other missing data approaches, for logistic regression. In addition, the authors evaluate a related representation of missing values555For categorical values, encoding missing values as a separate category; for numerical values, encoding missing values as an extremely large value that can always be split from the other values. on the same set of 36 datasets, and on one real-life dataset with missing values, for decision trees. They find that there is strong evidence that representing missing values is the best approach when they are informative; when this is not the case their results show no strong difference.\nThe comparison by Grzymala-Busse & Hu [38 ###reference_b38###] is based on 10 datasets with naturally occurring missing values. However, the setting is purely categorical \u2014 all attributes are transformed into categorical attributes \u2014 the only form of imputation is mode imputation, and the missing value approaches are evaluated on the basis of the LERS classifier (Learning from Examples based on Rough Sets [37 ###reference_b37###]).\nMarlin [52 ###reference_b52###] compares zero imputation with missing-indicators (augmentation with response indicators) against several forms of imputation without, for logistic regression and neural networks, on the basis of an extensive series of simulations, one dataset with artificially removed values, and three real datasets. For the real datasets, there is no strong difference in performance between the different approaches.\nMost recently, building on earlier experiments with simulated regression datasets [45 ###reference_b45###, 49 ###reference_b49###], Perez-Lebel et al. [60 ###reference_b60###] compare four different imputation techniques with and without missing-indicators (missingness mask) on seven prediction tasks derived from four real medical datasets, and conclude that missing-indicators consistently improve performance for gradient boosted trees, ridge regression and logistic regression.\nWe point out that the Missingness in Attribute (MIA) proposal [80 ###reference_b80###] for decision trees and decision tree ensembles can be understood as an implicit combination of missing-indicators with automatic imputation, and has also been shown to outperform imputation without missing-indicators in small-scale experimental studies [45 ###reference_b45###, 60 ###reference_b60###].\nFinally, even experimental comparisons of missing data that do not feature the missing-indicator approach generally do not involve more than a handful of real-life datasets with naturally occurring missing values. We have only found the connected works [50 ###reference_b50###, 51 ###reference_b51###], which feature 21 datasets from the UCI repository, but 12 of these are problematic.666The target column of the echocardiogram dataset (\u2018alive-at-1\u2019) is supposed to denote whether a patient survived for at least one year, but it doesn\u2019t appear to agree with the columns from which it is derived, that denote how long a patient (has) survived and whether they were alive at the end of that period. The audiology dataset has a large number of small classes with complex labels and should perhaps be analysed with multi-label classification. In addition, it has ordinal attributes where the order of the values is not entirely clear, and three different values that potentially denote missingness (\u2018?\u2019, \u2018unmeasured\u2019 and \u2018absent\u2019), and it is not completely clear how they relate to each other. The house-votes-84 dataset contains \u2018?\u2019 values, but its documentation explicitly states that these values are not unknown, but indicate different forms of abstention. The ozone dataset is a time-series problem, while the task associated with the sponge and water-treatment datasets is clustering, with no obvious target for classification among their respective attributes. Finally, the breast-cancer (9), cleveland (7), dermatology (8), lung-cancer (5), post-operative (3) and wisconsin (16) datasets contain only very few missing values, and any performance difference between missing value approaches on these datasets may to a large extent be coincidental."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3",
|
| 31 |
+
"parent_section_id": null,
|
| 32 |
+
"section_name": "Experimental Setup",
|
| 33 |
+
"text": "To evaluate the effect of the missing-indicator approach on classification performance, we conduct a series of experiments, using the Python machine learning library scikit-learn [58 ###reference_b58###]."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "3.1",
|
| 37 |
+
"parent_section_id": "3",
|
| 38 |
+
"section_name": "Questions",
|
| 39 |
+
"text": "The aim of our experiments is to answer the following questions:\nDo missing-indicators increase performance, and does it matter which imputation strategy they are paired with?\nWhen do missing-indicators start to become useful in terms of missingness?\nDoes using mean imputation instead of mode imputation allow for more information to be learned from missing categorical values?\nHow do missing-indicators compare to model-specific approaches to missing values?"
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "3.2",
|
| 43 |
+
"parent_section_id": "3",
|
| 44 |
+
"section_name": "Evaluation",
|
| 45 |
+
"text": "We preprocess datasets by standardising numerical attributes and one-hot encoding categorical attributes (as required by the implementations in scikit-learn).\nWe measure classification performance by performing stratified five-fold cross-validation, repeating this for five different random states (which determine both the dataset splits and the initialisation of algorithms with a random component), and calculating the mean area under the receiver operator curve (AUROC). For multi-class datasets, we use the extension of AUROC defined in [40 ###reference_b40###].\nTo compare two alternatives A and B, we consider the -value of a one-sided Wilcoxon signed-rank test [82 ###reference_b82###] on the mean AUROC scores for our selection of datasets. When we compare A vs B, a score below 0.5 means that A increased performance on our selection of datasets; the lower the scores, the more confident we can be that this generalises to other similar datasets. Conversely, a score higher than 0.5 means that A decreased performance on our selection of datasets."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "3.3",
|
| 49 |
+
"parent_section_id": "3",
|
| 50 |
+
"section_name": "Imputation Strategies",
|
| 51 |
+
"text": "We consider the following three imputation strategies:\nMean/mode imputation replaces missing values of numerical and categorical attributes by, respectively, the mean and the mode of the non-missing values.\nNearest neighbour imputation [79 ###reference_b79###] replaces missing values of numerical and categorical attributes by, respectively, the mean and the mode of the 5 nearest non-missing values, with distance determined by the corresponding non-missing values for the other attributes.\nIterative imputation, as implemented in scikit-learn, based on [8 ###reference_b8###], predicts missing values of one attribute on the basis of the other attribute values using a round-robin approach. For numerical attributes, this uses Bayesian ridge regression [77 ###reference_b77###], initialised with mean imputation, while for categorical attributes, we use logistic regression, initialised with mode imputation.\nThe scikit-learn implementations of nearest neighbour and iterative imputation can currently only impute numerical features, so we had to adapt them for categorical imputation. In all other aspects, we follow the default settings of scikit-learn.777For the nomao dataset, iterative imputation diverged, so we had to restrict imputation to the interval ."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "3.4",
|
| 55 |
+
"parent_section_id": "3",
|
| 56 |
+
"section_name": "Classification Algorithms",
|
| 57 |
+
"text": "We consider the classification algorithms listed in Table 1 ###reference_###, as implemented in scikit-learn. Hyperparameters take their default values, except for SVM-L, LR and MLP, where we increase the maximum number of iterations to 10\u2009000 to increase the probability of convergence.\nFor a number of these algorithms, specific ways have been proposed to handle missing values: e.g. NN-2-D [19 ###reference_b19###], SVM-G [72 ###reference_b72###], MLP [78 ###reference_b78###, 73 ###reference_b73###, 43 ###reference_b43###] and CART [63 ###reference_b63###, 80 ###reference_b80###]. The purpose of the present experiment is to evaluate the general approach of using imputation with missing-indicators when these solutions have not been implemented, as is the case in scikit-learn."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "3.5",
|
| 61 |
+
"parent_section_id": "3",
|
| 62 |
+
"section_name": "Datasets",
|
| 63 |
+
"text": "We use twenty real-life datasets with naturally occurring missing values from the UCI repository for machine learning [20 ###reference_b20###] (Table 2 ###reference_###). These datasets are quite varied \u2014 they cover a number of different domains and contain between 155 and 76\u2009000 records, between 4 and 590 attributes, between 2 and 21 decision classes and missing value rates between 0.0032 and 0.43.\nWe have preprocessed these datasets in the following manner. We have removed attributes that were non-informative according to the accompanying documentation, as well as identifiers and alternative target values. When it was clear from the description that an attribute was categorical, we have treated it as such, even if it was originally represented with numerals. Conversely, where the possible values of an attribute admitted a semantic order, we have encoded them numerically. We have left binary attributes in their original encoding (categorical or numerical). To enable 5-fold cross-validation, we have removed classes with fewer than 5 records."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "4",
|
| 67 |
+
"parent_section_id": null,
|
| 68 |
+
"section_name": "Results and Discussion",
|
| 69 |
+
"text": "Using the experimental setup detailed in the previous section, we now try to answer the questions listed in Subsection 3.1 ###reference_###."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "4.1",
|
| 73 |
+
"parent_section_id": "4",
|
| 74 |
+
"section_name": "Do Missing-Indicators Increase Performance, and Does It Matter Which Imputation Strategy They Are Paired With?",
|
| 75 |
+
"text": "The -values obtained by comparing imputation with and without missing-indicators are displayed in Table 3 ###reference_###. Missing-indicators generally lead to increased performance \u2014 with the notable exception of CART, to which we return below. The more complicated imputation strategies do not result in much better results than mean/mode imputation when we pair imputation with missing-indicators (Table 4 ###reference_###). At best, nearest neighbour and iterative imputation only lead to a modest improvement, and for many classifiers, they actually decrease performance. Therefore, we focus on mean/mode imputation for the remainder of this section.\n###figure_1### ###figure_2### A possible reason for the failure of missing-indicators to increase performance with CART, is that by default, the scikit-learn implementation of this classifier does not perform pruning, making it prone to overfitting. To test this hypothesis, we repeat our experiment for CART and mean imputation, but this time we apply cost complexity pruning (). This clearly improves performance ( without missing-indicators, with missing-indicators), and now missing-indicators have a slight advantage ().\nWe have also taken a closer look at ERT and GBM, for which the performance increase from missing-indicators is not very significant. For ERT, this may be due to underfitting. If we increase the number of trees from the default 100 to 1000, this improves performance ( without missing-indicators, with missing-indicators), and makes the advantage of missing-indicators somewhat clearer ().\nFor GBM, the default choice of 100 iterations of gradient descent can lead to both under- or overfitting, depending on the dataset (Fig. 2 ###reference_###). We believe that it is generally preferable to continue training until an early-stopping criterion is met. However, applying the same criterion as with MLP888Setting aside 10% of the data for validation, stopping when validation loss has not decreased by at least 0.0001 for ten iterations, with a maximum of 10\u2009000 iterations. does not improve performance over the default of 100 ( without missing-indicators, with missing-indicators) and does not change the relative advantage due to missing-indicators ()."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "4.2",
|
| 79 |
+
"parent_section_id": "4",
|
| 80 |
+
"section_name": "When Do Missing-Indicators Start to Become Useful in Terms of Missingness?",
|
| 81 |
+
"text": "The theoretical motivation for representing missing values through missing-indicators is that this allows classifiers to learn the information encoded in their distribution. In principle, this should be easier when there are more examples to learn from. We can use this principle to obtain a better understanding of when missing-indicators might be useful on a per-attribute level.\nThe challenge that we have to overcome is that we would like to study individual attributes, but classification performance is measured on the dataset level. We tackle this by studying datasets with only one attribute with missing values, allowing us to investigate the relation between the properties of the attribute and classification performance on the dataset.\nWe conduct the following experiment. For each attribute with missing values in each dataset, we reduce the original dataset by removing all other attributes with missing values. We thus obtain 1148 reduced datasets with only one attribute with missing values, onto which we apply each of our classifiers (with pruning for CART, 1000 trees for ERT and early-stopping for GBM) and consider whether missing-indicators increase or decrease AUROC (we dismiss ties). Finally, for each classifier we fit a logistic regression model with cluster robust covariance (clustered by the originating dataset), with the following potential parameters: categoricalness (whether the attribute is categorical) and either the number of missing values (log-transformed) or the missing rate. We use the Akaike information criterion [1 ###reference_b1###] to decide whether to select these parameters.\nWe find that for most classifiers, either the absolute or the relative number of missing values is an informative parameter with positive coefficient. For MLP, neither parameter is informative, while for RF, the number of missing values is an informative parameter with negative coefficient, for which we have no explanation at present. For every classifier, categoricalness is an informative parameter with positive coefficient, meaning that missing-indicators are more beneficial for categorical than for numerical attributes.\nThe fitted logistic regression models allow us to calculate attribute-specific thresholds above which missing-indicators are more likely than not to increase AUROC, for all classifiers except MLP and RF (Table 5 ###reference_###). In many cases, these thresholds are 1 or 0.0, indicating that missing-indicators are always likely to increase AUROC."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "4.3",
|
| 85 |
+
"parent_section_id": "4",
|
| 86 |
+
"section_name": "Does Using Mean Imputation Instead of Mode Imputation Allow for More Information to Be Learned from Missing Categorical Values?",
|
| 87 |
+
"text": "As indicated above, missing-indicators are generally more likely to increase performance for categorical than for numerical attributes. A potential explanation for this is the fact that the mode of a categorical attribute is one of the non-missing values, whereas the mean of a numerical attribute is generally not equal to one of the non-missing values. Therefore, categorical imputation renders missing values truly indistinguishable from non-missing values, whereas numerical imputation does not \u2014 the information expressed by missing values may be partially recoverable, as argued by Le Morvan et al. [49 ###reference_b49###] and discussed in the Introduction.\nWe can achieve a similar partial representation of missing categorical values by changing the order in which we perform imputation and one-hot encoding, i.e. by performing numerical imputation on one-hot encoded categorical attributes with missing values. For imputation without missing-indicators, this indeed leads to better performance for some classifiers, while in combination with missing-indicators, it does not make much of a difference (Table 6 ###reference_###)999LR is an exception here, we have no explanation for this.."
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "4.4",
|
| 91 |
+
"parent_section_id": "4",
|
| 92 |
+
"section_name": "How Do Missing-Indicators Compare to Model-Specific Approaches to Missing Values?",
|
| 93 |
+
"text": "While not the primary focus of this paper, we may also wonder how the missing-indicator approach compares to model-specific approaches to missing values. For CART and RF, we consider the proposal by [80 ###reference_b80###], that a decision tree should evaluate two variants of each split, with missing values sent to either side. This has been implemented in the latest version of scikit-learn (1.4.0), which was released after the previous experiments in this section were conducted. In addition, we have modified the implementation of the nearest neighbour classifier in scikit-learn to obtain the approach labelled as \u2018normal\u2019 in [19 ###reference_b19###]. This calculates the distance between two records by linearly extrapolating the distance calculated only on the basis of all non-missing feature values. We note that every model-specific approach is different \u2014 we expect that their effect on classification performance will differ from case to case \u2014 so our evaluation of these two approaches only serves an illustrative purpose.\nWe find (Table 7 ###reference_###, Test 1) that the model-specific approach for the nearest neighbour classifiers performs significantly worse than mean/mean imputation with missing-indicators. In contrast, there is no difference for CART, and the model-specific approach appears to perform better for RF. We can also ask whether these model-specific approaches benefit from adding missing-indicators \u2014 here this only appears to be the case for the nearest neighbour classifiers (Table 7 ###reference_###, Test 2), i.e. when the model-specific approach performs badly. However, even with missing-indicators the model-specific approach for the nearest neighbour classifiers does not perform better than mean/mean imputation with missing indicators (Table 7 ###reference_###, Test 3)."
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "5",
|
| 97 |
+
"parent_section_id": null,
|
| 98 |
+
"section_name": "Conclusion",
|
| 99 |
+
"text": "We have presented the first large-scale experimental evaluation of the effect of the missing-indicator approach on classification performance, conducted on real datasets with naturally occurring missing values, paired with three different imputation techniques. The central question was whether, on balance, more benefit can be derived from the additional information encoded in a representation of missing values, or from the lower-dimensional projection of the data obtained by omitting missing-indicators.\nOn the whole, missing-indicators increase performance for the classification algorithms that we considered. An exception was CART, which suffers from overfitting in its default scikit-learn configuration. When pruning is applied, missing-indicators do increase performance. For ERT, the advantage of missing-indicators becomes more significant when underfitting is controlled.\nWe also found that, in the presence of missing-indicators, nearest neighbour and iterative imputation do not significantly increase performance over simple mean/mode imputation. This is a useful finding, because implementations of more sophisticated imputation strategies may not always be available to practitioners working in different frameworks, or easy to apply.\nIn a follow-up experiment, we determined attribute-specific missingness thresholds, above which missing-indicators are more likely than not to increase performance. For categorical attributes, this threshold is generally very low, while for numerical attributes, there is more variation among classifiers, in particular as to whether this threshold is absolute or relative to the total number of records.\nThe greater usefulness of missing-indicators for categorical than for numerical attributes can be explained by the fact that the mean of a numerical attribute is not generally identical to any of the non-missing values, and that mean imputation therefore preserves some of the information of missing values. This is supported by the results of a further experiment, which showed that, in the absence of missing-indicators, applying mean imputation to one-hot encoded categorical attributes results in somewhat better performance than mode imputation.\nWhile we have mainly considered the use of missing-indicators with imputation, there also exist model-specific solutions for missing values, that can in turn be combined with missing-indicators. Whether missing-indicators outperform these model-specific approaches has to be determined on a case-by-case basis. This was illustrated by our third follow-up experiment for nearest neighbour and decision tree classifiers.\nWe conclude that the combination of mean/mode imputation with missing-indicators is a safe default approach towards missing values in classification tasks. While over- or underfitting is a concern for certain classifiers, it is a concern for these classifiers with or without missing-indicators. However, practitioners may want to omit missing-indicators when the classification algorithm to be used has a special provision for missing values, when the missingness thresholds that we determined are not met, or on the basis of specific information about the distribution of missing values in the dataset. The use of missing-indicators can also be combined with dimensionality reduction algorithms to increase the information density of the resulting dataset.\nThe problem of missing data has been the subject of a rich body of theoretical literature. We hope to have contributed with this paper to the practical evaluation of some of that theory. In particular, we are happy to have identified twenty real-life datasets with missing values, and hope that in the future, more such datasets will be collected."
|
| 100 |
+
}
|
| 101 |
+
],
|
| 102 |
+
"appendix": [],
|
| 103 |
+
"tables": {
|
| 104 |
+
"1": {
|
| 105 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S3.T1\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 1: </span>Classification algorithms.</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S3.T1.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S3.T1.1.1.1\">\n<th class=\"ltx_td ltx_align_justify ltx_align_top ltx_th ltx_th_column ltx_border_tt\" id=\"S3.T1.1.1.1.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.1.1.1.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.1.1.1.1.1\" style=\"width:65.0pt;\">Name</span>\n</span>\n</th>\n<th class=\"ltx_td ltx_align_justify ltx_align_top ltx_th ltx_th_column ltx_border_tt\" id=\"S3.T1.1.1.1.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.1.1.2.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.1.1.2.1.1\" style=\"width:325.2pt;\">Description</span>\n</span>\n</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S3.T1.1.2.1\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S3.T1.1.2.1.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.2.1.1.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.2.1.1.1.1\" style=\"width:65.0pt;\">NN-1</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S3.T1.1.2.1.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.2.1.2.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.2.1.2.1.1\" style=\"width:325.2pt;\">Nearest neighbours <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib28\" title=\"\">28</a>]</cite> with (Boscovich) 1-distance</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.3.2\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S3.T1.1.3.2.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.3.2.1.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.3.2.1.1.1\" style=\"width:65.0pt;\">NN-2</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S3.T1.1.3.2.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.3.2.2.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.3.2.2.1.1\" style=\"width:325.2pt;\">Nearest neighbours with (Euclidean) 2-distance</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.4.3\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S3.T1.1.4.3.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.4.3.1.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.4.3.1.1.1\" style=\"width:65.0pt;\">NN-1-D</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S3.T1.1.4.3.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.4.3.2.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.4.3.2.1.1\" style=\"width:325.2pt;\">Nearest neighbours with 1-distance, distance-weighted <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib21\" title=\"\">21</a>]</cite></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.5.4\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S3.T1.1.5.4.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.5.4.1.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.5.4.1.1.1\" style=\"width:65.0pt;\">NN-2-D</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S3.T1.1.5.4.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.5.4.2.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.5.4.2.1.1\" style=\"width:325.2pt;\">Nearest neighbours with 2-distance, distance-weighted</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.6.5\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S3.T1.1.6.5.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.6.5.1.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.6.5.1.1.1\" style=\"width:65.0pt;\">SVM-L</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S3.T1.1.6.5.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.6.5.2.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.6.5.2.1.1\" style=\"width:325.2pt;\">Soft-margin Support Vector Machine <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib14\" title=\"\">14</a>]</cite> with linear kernel</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.7.6\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S3.T1.1.7.6.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.7.6.1.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.7.6.1.1.1\" style=\"width:65.0pt;\">SVM-G</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S3.T1.1.7.6.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.7.6.2.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.7.6.2.1.1\" style=\"width:325.2pt;\">Soft-margin Support Vector Machine with Gaussian kernel</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.8.7\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S3.T1.1.8.7.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.8.7.1.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.8.7.1.1.1\" style=\"width:65.0pt;\">LR</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S3.T1.1.8.7.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.8.7.2.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.8.7.2.1.1\" style=\"width:325.2pt;\">Multinomial logistic regression <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib15\" title=\"\">15</a>]</cite></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.9.8\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S3.T1.1.9.8.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.9.8.1.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.9.8.1.1.1\" style=\"width:65.0pt;\">MLP</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S3.T1.1.9.8.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.9.8.2.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.9.8.2.1.1\" style=\"width:325.2pt;\">Multilayer perceptron <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib65\" title=\"\">65</a>]</cite> with ReLu activation <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib31\" title=\"\">31</a>]</cite>, Glorot initialisation <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib34\" title=\"\">34</a>]</cite> and Adam optimisation <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib47\" title=\"\">47</a>]</cite></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.10.9\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S3.T1.1.10.9.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.10.9.1.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.10.9.1.1.1\" style=\"width:65.0pt;\">CART</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S3.T1.1.10.9.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.10.9.2.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.10.9.2.1.1\" style=\"width:325.2pt;\">Classification and Regression Tree <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib7\" title=\"\">7</a>]</cite></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.11.10\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S3.T1.1.11.10.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.11.10.1.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.11.10.1.1.1\" style=\"width:65.0pt;\">RF</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S3.T1.1.11.10.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.11.10.2.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.11.10.2.1.1\" style=\"width:325.2pt;\">Random Forest <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib6\" title=\"\">6</a>]</cite></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.12.11\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S3.T1.1.12.11.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.12.11.1.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.12.11.1.1.1\" style=\"width:65.0pt;\">ERT</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S3.T1.1.12.11.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.12.11.2.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.12.11.2.1.1\" style=\"width:325.2pt;\">Extremely Randomised Trees <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib33\" title=\"\">33</a>]</cite></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.13.12\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S3.T1.1.13.12.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.13.12.1.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.13.12.1.1.1\" style=\"width:65.0pt;\">ABT</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S3.T1.1.13.12.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.13.12.2.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.13.12.2.1.1\" style=\"width:325.2pt;\">Ada-boosted trees <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib29\" title=\"\">29</a>]</cite> with SAMME (stagewise additive modeling using a\nmulti-class exponential loss function) <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib83\" title=\"\">83</a>]</cite></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.14.13\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_bb\" id=\"S3.T1.1.14.13.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.14.13.1.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.14.13.1.1.1\" style=\"width:65.0pt;\">GBM</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_bb\" id=\"S3.T1.1.14.13.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.14.13.2.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.14.13.2.1.1\" style=\"width:325.2pt;\">Gradient Boosting Machine <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib30\" title=\"\">30</a>]</cite></span>\n</span>\n</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 106 |
+
"capture": "Table 1: Classification algorithms."
|
| 107 |
+
},
|
| 108 |
+
"2": {
|
| 109 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S3.T2\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 2: </span>Real-life classification datasets with missing values from the UCI repository for machine learning.</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S3.T2.4\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S3.T2.4.5.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_tt\" id=\"S3.T2.4.5.1.1\">Dataset</th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column ltx_border_tt\" id=\"S3.T2.4.5.1.2\">Records</th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column ltx_border_tt\" id=\"S3.T2.4.5.1.3\">Classes</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_tt\" colspan=\"3\" id=\"S3.T2.4.5.1.4\">Attributes</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_tt\" colspan=\"3\" id=\"S3.T2.4.5.1.5\">Missing value rate</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_tt\" id=\"S3.T2.4.5.1.6\">Source</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S3.T2.4.4\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.1.1.1\"></td>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column\" id=\"S3.T2.2.2.2\"></th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column\" id=\"S3.T2.3.3.3\"></th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column\" id=\"S3.T2.4.4.5\">Num</th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column\" id=\"S3.T2.4.4.6\">Cat</th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column\" id=\"S3.T2.4.4.7\">Total</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column\" id=\"S3.T2.4.4.8\">Num</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column\" id=\"S3.T2.4.4.9\">Cat</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column\" id=\"S3.T2.4.4.10\">Total</th>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.4.4\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.4.6.1\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T2.4.6.1.1\">adult</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T2.4.6.1.2\">48842</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T2.4.6.1.3\">2</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T2.4.6.1.4\">5</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T2.4.6.1.5\">8</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T2.4.6.1.6\">13</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T2.4.6.1.7\">0.0</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T2.4.6.1.8\">0.017</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T2.4.6.1.9\">0.010</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T2.4.6.1.10\"><cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib48\" title=\"\">48</a>]</cite></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.4.7.2\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.7.2.1\">agaricus-lepiota</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.7.2.2\">8124</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.7.2.3\">2</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.7.2.4\">1</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.7.2.5\">21</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.7.2.6\">22</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.7.2.7\">0.0</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.7.2.8\">0.015</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.7.2.9\">0.014</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.7.2.10\"><cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib71\" title=\"\">71</a>]</cite></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.4.8.3\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.8.3.1\">aps-failure</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.8.3.2\">76000</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.8.3.3\">2</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.8.3.4\">170</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.8.3.5\">0</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.8.3.6\">170</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.8.3.7\">0.083</td>\n<td class=\"ltx_td\" id=\"S3.T2.4.8.3.8\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.8.3.9\">0.083</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.8.3.10\"><cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib27\" title=\"\">27</a>]</cite></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.4.9.4\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.9.4.1\">arrhythmia</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.9.4.2\">443</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.9.4.3\">10</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.9.4.4\">279</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.9.4.5\">0</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.9.4.6\">279</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.9.4.7\">0.0032</td>\n<td class=\"ltx_td\" id=\"S3.T2.4.9.4.8\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.9.4.9\">0.0032</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.9.4.10\"><cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib39\" title=\"\">39</a>]</cite></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.4.10.5\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.10.5.1\">bands</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.10.5.2\">540</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.10.5.3\">2</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.10.5.4\">19</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.10.5.5\">15</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.10.5.6\">34</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.10.5.7\">0.054</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.10.5.8\">0.054</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.10.5.9\">0.054</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.10.5.10\"><cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib26\" title=\"\">26</a>]</cite></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.4.11.6\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.11.6.1\">ckd</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.11.6.2\">400</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.11.6.3\">2</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.11.6.4\">14</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.11.6.5\">10</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.11.6.6\">24</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.11.6.7\">0.14</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.11.6.8\">0.059</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.11.6.9\">0.11</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.11.6.10\"><cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib67\" title=\"\">67</a>]</cite></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.4.12.7\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.12.7.1\">crx</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.12.7.2\">690</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.12.7.3\">2</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.12.7.4\">6</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.12.7.5\">9</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.12.7.6\">15</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.12.7.7\">0.0060</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.12.7.8\">0.0068</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.12.7.9\">0.0065</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.12.7.10\"><cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib62\" title=\"\">62</a>]</cite></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.4.13.8\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.13.8.1\">dress-sales</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.13.8.2\">500</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.13.8.3\">2</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.13.8.4\">3</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.13.8.5\">9</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.13.8.6\">12</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.13.8.7\">0.20</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.13.8.8\">0.19</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.13.8.9\">0.19</td>\n<td class=\"ltx_td\" id=\"S3.T2.4.13.8.10\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.4.14.9\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.14.9.1\">exasens</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.14.9.2\">399</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.14.9.3\">4</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.14.9.4\">7</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.14.9.5\">0</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.14.9.6\">7</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.14.9.7\">0.43</td>\n<td class=\"ltx_td\" id=\"S3.T2.4.14.9.8\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.14.9.9\">0.43</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.14.9.10\"><cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib74\" title=\"\">74</a>]</cite></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.4.15.10\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.15.10.1\">hcc</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.15.10.2\">165</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.15.10.3\">2</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.15.10.4\">49</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.15.10.5\">0</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.15.10.6\">49</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.15.10.7\">0.10</td>\n<td class=\"ltx_td\" id=\"S3.T2.4.15.10.8\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.15.10.9\">0.10</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.15.10.10\"><cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib68\" title=\"\">68</a>]</cite></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.4.16.11\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.16.11.1\">heart-disease</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.16.11.2\">1611</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.16.11.3\">2</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.16.11.4\">13</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.16.11.5\">1</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.16.11.6\">14</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.16.11.7\">0.18</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.16.11.8\">0.0</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.16.11.9\">0.17</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.16.11.10\"><cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib17\" title=\"\">17</a>]</cite></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.4.17.12\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.17.12.1\">hepatitis</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.17.12.2\">155</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.17.12.3\">2</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.17.12.4\">19</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.17.12.5\">0</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.17.12.6\">19</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.17.12.7\">0.057</td>\n<td class=\"ltx_td\" id=\"S3.T2.4.17.12.8\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.17.12.9\">0.057</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.17.12.10\"><cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib22\" title=\"\">22</a>]</cite></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.4.18.13\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.18.13.1\">horse-colic</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.18.13.2\">368</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.18.13.3\">2</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.18.13.4\">19</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.18.13.5\">1</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.18.13.6\">20</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.18.13.7\">0.25</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.18.13.8\">0.39</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.18.13.9\">0.26</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.18.13.10\"><cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib54\" title=\"\">54</a>]</cite></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.4.19.14\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.19.14.1\">mammographic-masses</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.19.14.2\">961</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.19.14.3\">2</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.19.14.4\">2</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.19.14.5\">2</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.19.14.6\">4</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.19.14.7\">0.042</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.19.14.8\">0.041</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.19.14.9\">0.042</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.19.14.10\"><cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib24\" title=\"\">24</a>]</cite></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.4.20.15\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.20.15.1\">mi</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.20.15.2\">1700</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.20.15.3\">8</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.20.15.4\">111</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.20.15.5\">0</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.20.15.6\">111</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.20.15.7\">0.085</td>\n<td class=\"ltx_td\" id=\"S3.T2.4.20.15.8\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.20.15.9\">0.085</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.20.15.10\"><cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib35\" title=\"\">35</a>]</cite></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.4.21.16\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.21.16.1\">nomao</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.21.16.2\">34465</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.21.16.3\">2</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.21.16.4\">89</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.21.16.5\">29</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.21.16.6\">118</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.21.16.7\">0.38</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.21.16.8\">0.37</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.21.16.9\">0.38</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.21.16.10\"><cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib9\" title=\"\">9</a>]</cite></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.4.22.17\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.22.17.1\">primary-tumor</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.22.17.2\">330</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.22.17.3\">15</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.22.17.4\">16</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.22.17.5\">1</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.22.17.6\">17</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.22.17.7\">0.029</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.22.17.8\">0.20</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.22.17.9\">0.039</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.22.17.10\"><cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib10\" title=\"\">10</a>]</cite></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.4.23.18\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.23.18.1\">secom</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.23.18.2\">1567</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.23.18.3\">2</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.23.18.4\">590</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.23.18.5\">0</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.23.18.6\">590</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.23.18.7\">0.045</td>\n<td class=\"ltx_td\" id=\"S3.T2.4.23.18.8\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.23.18.9\">0.045</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.23.18.10\"><cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib53\" title=\"\">53</a>]</cite></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.4.24.19\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.24.19.1\">soybean</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.24.19.2\">683</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.24.19.3\">19</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.24.19.4\">22</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.24.19.5\">13</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.4.24.19.6\">35</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.24.19.7\">0.099</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.24.19.8\">0.096</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.24.19.9\">0.098</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T2.4.24.19.10\"><cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib55\" title=\"\">55</a>]</cite></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.4.25.20\">\n<td class=\"ltx_td ltx_align_left ltx_border_bb\" id=\"S3.T2.4.25.20.1\">thyroid0387</td>\n<td class=\"ltx_td ltx_align_right ltx_border_bb\" id=\"S3.T2.4.25.20.2\">9172</td>\n<td class=\"ltx_td ltx_align_right ltx_border_bb\" id=\"S3.T2.4.25.20.3\">18</td>\n<td class=\"ltx_td ltx_align_right ltx_border_bb\" id=\"S3.T2.4.25.20.4\">7</td>\n<td class=\"ltx_td ltx_align_right ltx_border_bb\" id=\"S3.T2.4.25.20.5\">16</td>\n<td class=\"ltx_td ltx_align_right ltx_border_bb\" id=\"S3.T2.4.25.20.6\">23</td>\n<td class=\"ltx_td ltx_align_left ltx_border_bb\" id=\"S3.T2.4.25.20.7\">0.22</td>\n<td class=\"ltx_td ltx_align_left ltx_border_bb\" id=\"S3.T2.4.25.20.8\">0.0021</td>\n<td class=\"ltx_td ltx_align_left ltx_border_bb\" id=\"S3.T2.4.25.20.9\">0.069</td>\n<td class=\"ltx_td ltx_align_left ltx_border_bb\" id=\"S3.T2.4.25.20.10\"><cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2206.14254v4#bib.bib64\" title=\"\">64</a>]</cite></td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 110 |
+
"capture": "Table 2: Real-life classification datasets with missing values from the UCI repository for machine learning."
|
| 111 |
+
},
|
| 112 |
+
"3": {
|
| 113 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S3.T3\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 3: </span>One-sided -values, imputation with missing-indicators versus without.</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S3.T3.3\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S3.T3.3.2.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_tt\" id=\"S3.T3.3.2.1.1\">Classifier</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_tt\" colspan=\"3\" id=\"S3.T3.3.2.1.2\">Imputation strategy</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S3.T3.3.1\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.1.1\"></td>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column\" id=\"S3.T3.3.1.2\">Mean/mode</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column\" id=\"S3.T3.3.1.3\">Neighbours</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column\" id=\"S3.T3.3.1.4\">Iterative</th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T3.3.3.1\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.3.3.1.1\">NN-1</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.3.3.1.2\">0.0088</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.3.3.1.3\">0.0015</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.3.3.1.4\">0.0017</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T3.3.4.2\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.4.2.1\">NN-2</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.4.2.2\">0.015</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.4.2.3\">0.0024</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.4.2.4\">0.00048</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T3.3.5.3\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.5.3.1\">NN-1-D</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.5.3.2\">0.0045</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.5.3.3\">0.0019</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.5.3.4\">0.0011</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T3.3.6.4\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.6.4.1\">NN-2-D</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.6.4.2\">0.0019</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.6.4.3\">0.0031</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.6.4.4\">0.00027</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T3.3.7.5\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.7.5.1\">SVM-L</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.7.5.2\">0.13</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.7.5.3\">0.27</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.7.5.4\">0.099</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T3.3.8.6\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.8.6.1\">SVM-G</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.8.6.2\">0.0032</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.8.6.3\">0.0027</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.8.6.4\">0.0021</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T3.3.9.7\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.9.7.1\">LR</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.9.7.2\">0.079</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.9.7.3\">0.063</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.9.7.4\">0.068</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T3.3.10.8\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.10.8.1\">MLP</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.10.8.2\">0.0027</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.10.8.3\">0.0063</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.10.8.4\">0.0056</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T3.3.11.9\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.11.9.1\">CART</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.11.9.2\">0.44</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.11.9.3\">0.39</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.11.9.4\">0.40</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T3.3.12.10\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.12.10.1\">RF</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.12.10.2\">0.038</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.12.10.3\">0.051</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.12.10.4\">0.17</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T3.3.13.11\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.13.11.1\">ERT</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.13.11.2\">0.28</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.13.11.3\">0.0099</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.13.11.4\">0.026</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T3.3.14.12\">\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.14.12.1\">ABT</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.14.12.2\">0.089</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.14.12.3\">0.078</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T3.3.14.12.4\">0.47</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T3.3.15.13\">\n<td class=\"ltx_td ltx_align_left ltx_border_bb\" id=\"S3.T3.3.15.13.1\">GBM</td>\n<td class=\"ltx_td ltx_align_left ltx_border_bb\" id=\"S3.T3.3.15.13.2\">0.17</td>\n<td class=\"ltx_td ltx_align_left ltx_border_bb\" id=\"S3.T3.3.15.13.3\">0.012</td>\n<td class=\"ltx_td ltx_align_left ltx_border_bb\" id=\"S3.T3.3.15.13.4\">0.36</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 114 |
+
"capture": "Table 3: One-sided -values, imputation with missing-indicators versus without."
|
| 115 |
+
},
|
| 116 |
+
"4": {
|
| 117 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T4\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 4: </span>One-sided -values, missing-indicators with iterative and nearest neighbour versus mean/mode imputation.</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.T4.3\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T4.3.2.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_tt\" id=\"S4.T4.3.2.1.1\">Classifier</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_tt\" colspan=\"2\" id=\"S4.T4.3.2.1.2\">Imputation strategy</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T4.3.1\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.1.1\"></td>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column\" id=\"S4.T4.3.1.2\">Neighbours</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column\" id=\"S4.T4.3.1.3\">Iterative</th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.3.3.1\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T4.3.3.1.1\">NN-1</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T4.3.3.1.2\">0.94</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T4.3.3.1.3\">0.15</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.3.4.2\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.4.2.1\">NN-2</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.4.2.2\">0.78</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.4.2.3\">0.19</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.3.5.3\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.5.3.1\">NN-1-D</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.5.3.2\">0.97</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.5.3.3\">0.55</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.3.6.4\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.6.4.1\">NN-2-D</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.6.4.2\">0.84</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.6.4.3\">0.23</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.3.7.5\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.7.5.1\">SVM-L</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.7.5.2\">0.53</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.7.5.3\">0.61</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.3.8.6\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.8.6.1\">SVM-G</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.8.6.2\">0.47</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.8.6.3\">0.94</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.3.9.7\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.9.7.1\">LR</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.9.7.2\">0.40</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.9.7.3\">0.83</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.3.10.8\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.10.8.1\">MLP</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.10.8.2\">0.30</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.10.8.3\">0.55</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.3.11.9\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.11.9.1\">CART</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.11.9.2\">0.69</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.11.9.3\">0.79</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.3.12.10\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.12.10.1\">RF</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.12.10.2\">0.61</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.12.10.3\">0.86</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.3.13.11\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.13.11.1\">ERT</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.13.11.2\">0.61</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.13.11.3\">0.64</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.3.14.12\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.14.12.1\">ABT</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.14.12.2\">0.33</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.3.14.12.3\">0.78</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.3.15.13\">\n<td class=\"ltx_td ltx_align_left ltx_border_bb\" id=\"S4.T4.3.15.13.1\">GBM</td>\n<td class=\"ltx_td ltx_align_left ltx_border_bb\" id=\"S4.T4.3.15.13.2\">0.93</td>\n<td class=\"ltx_td ltx_align_left ltx_border_bb\" id=\"S4.T4.3.15.13.3\">0.85</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 118 |
+
"capture": "Table 4: One-sided -values, missing-indicators with iterative and nearest neighbour versus mean/mode imputation."
|
| 119 |
+
},
|
| 120 |
+
"5": {
|
| 121 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T5\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 5: </span>Thresholds above which missing-indicators are more likely than not to increase AUROC, in terms of the absolute number of missing values or the missing rate.</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.T5.1\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T5.1.1.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_tt\" id=\"S4.T5.1.1.1.1\">Classifier</th>\n<td class=\"ltx_td ltx_align_left ltx_border_tt\" colspan=\"2\" id=\"S4.T5.1.1.1.2\">Missing values</td>\n<td class=\"ltx_td ltx_align_left ltx_border_tt\" colspan=\"2\" id=\"S4.T5.1.1.1.3\">Missing rate</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.2.2\">\n<th class=\"ltx_td ltx_th ltx_th_row\" id=\"S4.T5.1.2.2.1\"></th>\n<td class=\"ltx_td ltx_align_right\" id=\"S4.T5.1.2.2.2\">Cat</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S4.T5.1.2.2.3\">Num</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S4.T5.1.2.2.4\">Cat</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S4.T5.1.2.2.5\">Num</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.3.3\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S4.T5.1.3.3.1\">NN-1</th>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S4.T5.1.3.3.2\">1</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S4.T5.1.3.3.3\">302</td>\n<td class=\"ltx_td ltx_border_t\" id=\"S4.T5.1.3.3.4\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S4.T5.1.3.3.5\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.4.4\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T5.1.4.4.1\">NN-2</th>\n<td class=\"ltx_td ltx_align_right\" id=\"S4.T5.1.4.4.2\">2</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S4.T5.1.4.4.3\">130</td>\n<td class=\"ltx_td\" id=\"S4.T5.1.4.4.4\"></td>\n<td class=\"ltx_td\" id=\"S4.T5.1.4.4.5\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.5.5\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T5.1.5.5.1\">NN-1-D</th>\n<td class=\"ltx_td ltx_align_right\" id=\"S4.T5.1.5.5.2\">1</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S4.T5.1.5.5.3\">291</td>\n<td class=\"ltx_td\" id=\"S4.T5.1.5.5.4\"></td>\n<td class=\"ltx_td\" id=\"S4.T5.1.5.5.5\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.6.6\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T5.1.6.6.1\">NN-2-D</th>\n<td class=\"ltx_td ltx_align_right\" id=\"S4.T5.1.6.6.2\">1</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S4.T5.1.6.6.3\">73</td>\n<td class=\"ltx_td\" id=\"S4.T5.1.6.6.4\"></td>\n<td class=\"ltx_td\" id=\"S4.T5.1.6.6.5\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.7.7\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T5.1.7.7.1\">SVM-L</th>\n<td class=\"ltx_td\" id=\"S4.T5.1.7.7.2\"></td>\n<td class=\"ltx_td\" id=\"S4.T5.1.7.7.3\"></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S4.T5.1.7.7.4\">0.0</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S4.T5.1.7.7.5\">0.0</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.8.8\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T5.1.8.8.1\">SVM-G</th>\n<td class=\"ltx_td\" id=\"S4.T5.1.8.8.2\"></td>\n<td class=\"ltx_td\" id=\"S4.T5.1.8.8.3\"></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S4.T5.1.8.8.4\">0.0</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S4.T5.1.8.8.5\">0.40</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.9.9\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T5.1.9.9.1\">LR</th>\n<td class=\"ltx_td\" id=\"S4.T5.1.9.9.2\"></td>\n<td class=\"ltx_td\" id=\"S4.T5.1.9.9.3\"></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S4.T5.1.9.9.4\">0.0</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S4.T5.1.9.9.5\">0.0</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.10.10\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T5.1.10.10.1\">CART</th>\n<td class=\"ltx_td\" id=\"S4.T5.1.10.10.2\"></td>\n<td class=\"ltx_td\" id=\"S4.T5.1.10.10.3\"></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S4.T5.1.10.10.4\">0.0</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S4.T5.1.10.10.5\">0.12</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.11.11\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T5.1.11.11.1\">ERT</th>\n<td class=\"ltx_td\" id=\"S4.T5.1.11.11.2\"></td>\n<td class=\"ltx_td\" id=\"S4.T5.1.11.11.3\"></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S4.T5.1.11.11.4\">0.0</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S4.T5.1.11.11.5\">1.0</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.12.12\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T5.1.12.12.1\">ABT</th>\n<td class=\"ltx_td ltx_align_right\" id=\"S4.T5.1.12.12.2\">1</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S4.T5.1.12.12.3\">23200</td>\n<td class=\"ltx_td\" id=\"S4.T5.1.12.12.4\"></td>\n<td class=\"ltx_td\" id=\"S4.T5.1.12.12.5\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.13.13\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_bb\" id=\"S4.T5.1.13.13.1\">GBM</th>\n<td class=\"ltx_td ltx_border_bb\" id=\"S4.T5.1.13.13.2\"></td>\n<td class=\"ltx_td ltx_border_bb\" id=\"S4.T5.1.13.13.3\"></td>\n<td class=\"ltx_td ltx_align_right ltx_border_bb\" id=\"S4.T5.1.13.13.4\">0.0</td>\n<td class=\"ltx_td ltx_align_right ltx_border_bb\" id=\"S4.T5.1.13.13.5\">0.0</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 122 |
+
"capture": "Table 5: Thresholds above which missing-indicators are more likely than not to increase AUROC, in terms of the absolute number of missing values or the missing rate."
|
| 123 |
+
},
|
| 124 |
+
"6": {
|
| 125 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T6\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 6: </span>One-sided -values, mean imputation after one-hot encoding versus mode imputation of missing categorical values.</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.T6.3\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T6.3.1.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_tt\" id=\"S4.T6.3.1.1.1\">Classifier</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_tt\" id=\"S4.T6.3.1.1.2\">Without \u2014</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_tt\" id=\"S4.T6.3.1.1.3\">With missing-indicators</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T6.3.2.1\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T6.3.2.1.1\">NN-1</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T6.3.2.1.2\">0.020</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T6.3.2.1.3\">0.077</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T6.3.3.2\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.3.2.1\">NN-2</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.3.2.2\">0.14</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.3.2.3\">0.031</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T6.3.4.3\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.4.3.1\">NN-1-D</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.4.3.2\">0.016</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.4.3.3\">0.12</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T6.3.5.4\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.5.4.1\">NN-2-D</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.5.4.2\">0.16</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.5.4.3\">0.031</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T6.3.6.5\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.6.5.1\">SVM-L</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.6.5.2\">0.43</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.6.5.3\">0.57</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T6.3.7.6\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.7.6.1\">SVM-G</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.7.6.2\">0.17</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.7.6.3\">0.56</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T6.3.8.7\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.8.7.1\">LR</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.8.7.2\">0.81</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.8.7.3\">0.057</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T6.3.9.8\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.9.8.1\">MLP</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.9.8.2\">0.16</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.9.8.3\">0.60</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T6.3.10.9\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.10.9.1\">CART</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.10.9.2\">0.44</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.10.9.3\">0.30</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T6.3.11.10\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.11.10.1\">RF</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.11.10.2\">0.046</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.11.10.3\">0.57</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T6.3.12.11\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.12.11.1\">ERT</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.12.11.2\">0.030</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.12.11.3\">0.95</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T6.3.13.12\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.13.12.1\">ABT</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.13.12.2\">0.48</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T6.3.13.12.3\">0.62</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T6.3.14.13\">\n<td class=\"ltx_td ltx_align_left ltx_border_bb\" id=\"S4.T6.3.14.13.1\">GBM</td>\n<td class=\"ltx_td ltx_align_left ltx_border_bb\" id=\"S4.T6.3.14.13.2\">0.077</td>\n<td class=\"ltx_td ltx_align_left ltx_border_bb\" id=\"S4.T6.3.14.13.3\">0.54</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 126 |
+
"capture": "Table 6: One-sided -values, mean imputation after one-hot encoding versus mode imputation of missing categorical values."
|
| 127 |
+
},
|
| 128 |
+
"7": {
|
| 129 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T7\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 7: </span>One-sided -values, model-specific missing value approaches. Test 1: Mean/mean imputation with missing-indicators vs model-specific approach without; Test 2: Model-specific approach with missing-indicators vs model-specific approach without; Test 3: Model-specific approach vs mean/mean imputation, both with missing-indicators.</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.T7.3\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T7.3.1.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_tt\" id=\"S4.T7.3.1.1.1\">Classifier</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_tt\" id=\"S4.T7.3.1.1.2\">Test 1</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_tt\" id=\"S4.T7.3.1.1.3\">Test 2</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_tt\" id=\"S4.T7.3.1.1.4\">Test 3</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T7.3.2.1\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T7.3.2.1.1\">NN-1</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T7.3.2.1.2\">0.00036</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T7.3.2.1.3\">0.00017</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T7.3.2.1.4\">0.56</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T7.3.3.2\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T7.3.3.2.1\">NN-2</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T7.3.3.2.2\">0.00074</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T7.3.3.2.3\">0.00015</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T7.3.3.2.4\">0.94</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T7.3.4.3\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T7.3.4.3.1\">NN-1-D</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T7.3.4.3.2\">0.00020</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T7.3.4.3.3\">0.00015</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T7.3.4.3.4\">0.89</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T7.3.5.4\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T7.3.5.4.1\">NN-2-D</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T7.3.5.4.2\">0.00023</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T7.3.5.4.3\">0.00011</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T7.3.5.4.4\">0.93</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T7.3.6.5\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T7.3.6.5.1\">CART</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T7.3.6.5.2\">0.50</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T7.3.6.5.3\">0.86</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T7.3.6.5.4\">0.66</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T7.3.7.6\">\n<td class=\"ltx_td ltx_align_left ltx_border_bb\" id=\"S4.T7.3.7.6.1\">RF</td>\n<td class=\"ltx_td ltx_align_left ltx_border_bb\" id=\"S4.T7.3.7.6.2\">0.92</td>\n<td class=\"ltx_td ltx_align_left ltx_border_bb\" id=\"S4.T7.3.7.6.3\">0.60</td>\n<td class=\"ltx_td ltx_align_left ltx_border_bb\" id=\"S4.T7.3.7.6.4\">0.092</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 130 |
+
"capture": "Table 7: One-sided -values, model-specific missing value approaches. Test 1: Mean/mean imputation with missing-indicators vs model-specific approach without; Test 2: Model-specific approach with missing-indicators vs model-specific approach without; Test 3: Model-specific approach vs mean/mean imputation, both with missing-indicators."
|
| 131 |
+
}
|
| 132 |
+
},
|
| 133 |
+
"image_paths": {
|
| 134 |
+
"2(a)": {
|
| 135 |
+
"figure_path": "2206.14254v4_figure_2(a).png",
|
| 136 |
+
"caption": "(a) adult\nFigure 2: GBM test AUROC for two illustrative datasets, using mean/mode imputation without missing-indicators, for one random state and one cross-validation fold. The default hyperparameter value of 100 iterations leads to under- LABEL:sub@fig_gbm_underfitting and overfitting LABEL:sub@fig_gbm_overfitting.",
|
| 137 |
+
"url": "http://arxiv.org/html/2206.14254v4/x1.png"
|
| 138 |
+
},
|
| 139 |
+
"2(b)": {
|
| 140 |
+
"figure_path": "2206.14254v4_figure_2(b).png",
|
| 141 |
+
"caption": "(b) mammographic-masses\nFigure 2: GBM test AUROC for two illustrative datasets, using mean/mode imputation without missing-indicators, for one random state and one cross-validation fold. The default hyperparameter value of 100 iterations leads to under- LABEL:sub@fig_gbm_underfitting and overfitting LABEL:sub@fig_gbm_overfitting.",
|
| 142 |
+
"url": "http://arxiv.org/html/2206.14254v4/x2.png"
|
| 143 |
+
}
|
| 144 |
+
},
|
| 145 |
+
"validation": true,
|
| 146 |
+
"references": [],
|
| 147 |
+
"url": "http://arxiv.org/html/2206.14254v4"
|
| 148 |
+
}
|
20241030/2208.02439v2.json
ADDED
|
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "MPPI-IPDDP: A Hybrid Method of Collision-Free Smooth Trajectory Generation for Autonomous Robots",
|
| 3 |
+
"abstract": "This paper presents a hybrid trajectory optimization method designed to generate collision-free, smooth trajectories for autonomous mobile robots. By combining sampling-based Model Predictive Path Integral (MPPI) control with gradient-based Interior-Point Differential Dynamic Programming (IPDDP), we leverage their respective strengths in exploration and smoothing. The proposed method, MPPI-IPDDP, involves three steps: First, MPPI control is used to generate a coarse trajectory. Second, a collision-free convex corridor is constructed. Third, IPDDP is applied to smooth the coarse trajectory, utilizing the collision-free corridor from the second step. To demonstrate the effectiveness of our approach, we apply the proposed algorithm to trajectory optimization for differential-drive wheeled mobile robots and point-mass quadrotors. In comparisons with other MPPI variants and continuous optimization-based solvers, our method shows superior performance in terms of computational robustness and trajectory smoothness.\n\n\nCode: https://github.com/i-ASL/mppi-ipddp\n\nvideo: https://youtu.be/-oUAt5sd9Bk",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Path planning is a critical problem for autonomous vehicles and robots. Several considerations need to be addressed simultaneously in robot path planning and navigation, such as specifying mission goals, ensuring dynamic feasibility, avoiding collisions, and considering internal constraints.\nOptimization-based methods for path planning can explicitly handle these tasks. Two popular optimal path planning methods for autonomous robots are gradient-based and sampling-based methods. Gradient-based methods assume that the objective and constraint functions in the planning problem are differentiable, allowing for a fast, locally optimal smooth trajectory. These methods typically rely on nonlinear programming solvers such as IPOPT [1 ###reference_b1###] and SNOPT [2 ###reference_b2###].\nOn the other hand, sampling-based methods do not require function differentiability, making them more suitable for modeling obstacles of various shapes. Additionally, they naturally perform exploration, helping escape local optima. However, derivative-free sampling-based methods often result in coarse (e.g., zigzag) trajectories. For example, RRT-based methods can generate coarse trajectories [3 ###reference_b3###]. To balance the pros and cons of both methods, a hybrid approach combining them, as proposed in [4 ###reference_b4###], can be considered.\nThe optimization-based trajectory generation architecture known as model predictive control (MPC) has been extensively applied to robotic trajectory generation and planning problems [5 ###reference_b5###, 6 ###reference_b6###].\nDeep reinforcement learning-based trajectory generation for mobile robots is another popular approach [7 ###reference_b7###].\nA comparison of the continuous optimal control and reinforcement learning frameworks for trajectory generation of autonomous drone racing is provided in [8 ###reference_b8###]. Combining MPC with learning schemes has drawn noticeable attention to the robotics and control community [9 ###reference_b9###, 10 ###reference_b10###, 11 ###reference_b11###]. Using the property of differential flatness, a robotic trajectory optimization problem can be converted to finite-dimensional parametric optimization [12 ###reference_b12###].\nThis paper proposes a hybrid trajectory optimization method that modularly incorporates sampling-based and gradient-based methods. Fig. 1 ###reference_### illustrates the structure of the proposed collision-free smooth path planning approach. Our method generates a coarse trajectory and path corridors using sampling-based optimization via variational inference (VI). Subsequently, a smooth trajectory is obtained through gradient-based optimization via the differential dynamic programming (DDP) scheme. We assume that a collision checker is available to determine whether a collision has occurred.\nVariational inference (VI) refers to a class of optimization-based approaches for approximating posterior distributions, making Bayesian inference computationally efficient and scalable [13 ###reference_b13###, 14 ###reference_b14###]. The recently proposed model predictive path integral (MPPI) is a sampling-based planning method that uses the VI framework [15 ###reference_b15###, 16 ###reference_b16###]. In essence, MPPI samples random trajectories around a nominal trajectory, assigns weights based on cost, and updates the nominal trajectory using the weighted average. In this paper, MPPI is used to generate a coarse trajectory for exploration while avoiding collisions.\nWhile methods such as RRT and dynamic programming (DP) can achieve collision-free rough trajectory planning, we select MPPI control due to its suitability for real-time trajectory generation as a local planner, whereas RRT-like methods are often used as global planners. MPPI offers significant computational efficiency, allowing it to operate in real-time, which is critical for continuous control tasks. Additionally, MPPI inherently incorporates system dynamics within its rollout-based framework, providing a more seamless integration between trajectory planning and control. In contrast, RRT-like methods, while effective for finding rough trajectories, suffer from unpredictable computation times, which pose challenges for real-time controller design. This makes MPPI a better fit for our goal of real-time, dynamically feasible trajectory generation.\nTo smooth the coarse trajectory with gradient-based optimization, we introduce the concept of path corridors, a popular scheme in the literature [17 ###reference_b17###, 4 ###reference_b4###, 18 ###reference_b18###]. Path corridors are collections of convex collision-free regions guiding a robot toward a goal position. Unlike previous works, we use simple sampling-based VI framework to construct these corridors.\nTo achieve a smooth trajectory, we apply the differential dynamic programming (DDP) framework for gradient-based optimization. DDP-based approaches, including the iterative linear quadratic regulator (iLQR), have become popular for nonlinear optimal control problems and have been applied in many contexts of planning and nonlinear model predictive control for autonomous systems [19 ###reference_b19###, 20 ###reference_b20###]. DDP relies on Bellman\u2019s principle of optimality and the necessary conditions for optimal control problems, assuming all functions defined in the problem are smooth or at least twice continuously differentiable.\nSince original DDP approaches do not consider system state and input constraints, various methods have been developed to handle constraints efficiently in DDP. The augmented Lagrangian (AL) method is used in [21 ###reference_b21###], while the Karush-Kuhn-Tucker (KKT) condition is employed in [22 ###reference_b22###]. In [20 ###reference_b20###], a method combining the AL method with the KKT condition is proposed. The interior point differential dynamic programming (IPDDP) algorithm [23 ###reference_b23###], used in this work, is based on the KKT condition. IPDDP, summarized in Section II ###reference_###, incorporates all Lagrangian and barrier terms into the Q-function and solves a minimax problem.\n\n###figure_1### The main contributions of this paper can be summarized as follows:\nHybrid Path Planning Method: A novel hybrid path planning method is proposed. This method generates collision-free smooth trajectories by integrating sampling-based trajectory optimization using Model Predictive Path Integral (MPPI) and gradient-based smooth optimization (IPDDP).\nCollision-Free Convex Path Corridors: WA new method for constructing collision-free convex path corridors is introduced. This method leverages sampling-based optimization with variational inference to ensure the path is safe from obstacles.\nEffectiveness Demonstration: MPPI-IPDDP is demonstrated to be effective through two numerical case studies. These studies showcase the practical applicability and performance of the method in generating feasible and smooth trajectories.\nOpen-Sourced Codes: The C++ and MATLAB codes for the proposed MPPI-IPDDP solver are made available as open-source. This allows readers to replicate the results presented in the paper and customize the solution for their own robotic applications.\nThe remainder of this paper is organized as follows: Section II ###reference_### reviews sampling-based optimization via variational inference and IPDDP. Section III ###reference_### presents our path planning method, MPPI-IPDDP, for generating collision-free smooth trajectories. In Section IV ###reference_###, the effectiveness of the proposed MPPI-IPDDP is demonstrated through simulations in various environments and compared with other MPPI variants and NLP-based solvers. Section V ###reference_### discusses the remaining challenges and practical limitations. Finally, Section VI ###reference_### concludes the paper with suggestions for future work."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "II Preliminaries",
|
| 15 |
+
"text": ""
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "2.1",
|
| 19 |
+
"parent_section_id": "2",
|
| 20 |
+
"section_name": "II-A Sampling-based Optimization via Variational Inference",
|
| 21 |
+
"text": "An optimization problem can be reformulated as an inference problem and solved using the variational inference method [24 ###reference_b24###, 25 ###reference_b25###]. To achieve this, we introduce a binary random variable that indicates optimality, where represents the probability of optimality. For simplicity, we denote this probability as .\nIn this paper, we consider two different cases of variational inference (VI) for stochastic optimal control: VI for finite-dimensional optimization, where the decision variable is a parameter vector, and VI for trajectory optimization, where the goal is to generate an optimal trajectory for a control system.\nThe baseline methodology for these VI approaches is based on Model Predictive Path Integral (MPPI) control, which serves as a sampling-based framework for stochastic control problems [15 ###reference_b15###, 16 ###reference_b16###, 26 ###reference_b26###]. MPPI leverages importance sampling techniques to iteratively update control policies, making it well-suited for handling the probabilistic nature of the control tasks in both finite-dimensional optimization and trajectory optimization contexts."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2.1.1",
|
| 25 |
+
"parent_section_id": "2.1",
|
| 26 |
+
"section_name": "II-A1 VI for Finite-dimensional Optimization",
|
| 27 |
+
"text": "Let be a vector of decision variables. For variational inference corresponding to stochastic optimization or optimal control, the goal is to find the target distribution 111We will abuse the terminology of distributions (probability measure) and probability density functions. defined as\nLet be the likelihood function and be the empirical approximation of that is computed from samples that are drawn from the prior . Then, can be represented as\nwhere is the Dirac delta function, and is the number of samples.\nReplacing , we approximate with the forward KL divergence:\nIf a normal distribution is chosen for parameterizing the policy , then we get the closed-form solution for the optimal policy where\nIn this paper, this VI-based stochastic optimization method is used for constructing collision-free convex path-corridors in Section III-B ###reference_###."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "2.1.2",
|
| 31 |
+
"parent_section_id": "2.1",
|
| 32 |
+
"section_name": "II-A2 VI for Trajectory Optimization",
|
| 33 |
+
"text": "Let be a trajectory consisting of a sequence of controlled states and a sequence of control inputs over a finite time-horizon . The goal is to find the target distribution where represents stochastic dynamics:\nLet . Then can be rewritten as\nThe closed-form solution for the above optimization is given by\nLet be the empirical distribution of approximated with samples drawn from the prior . Then, can be represented as\nReplacing , we approximate with the forward KL divergence.\nIf the normal distribution is chosen for , then we get the closed form solution of where\nIn this paper, this VI-based trajectory optimization is applied for MPPI [15 ###reference_b15###, 16 ###reference_b16###] to generate a locally optimal trajectory in Section III-A ###reference_###."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "2.1.3",
|
| 37 |
+
"parent_section_id": "2.1",
|
| 38 |
+
"section_name": "II-A3 Additional Notes",
|
| 39 |
+
"text": "One of the most common choices for the likelihood function is where is a cost function and is known as the inverse temperature. With this likelihood function, the weight in Sections II-A1 ###reference_.SSS1### and II-A2 ###reference_.SSS2### can be interpreted as the likelihood ratio corresponding to the sampled candidate or , respectively. This implies that the lower the value of the higher the likelihood of being optimal at an exponential rate.\nSince this sampling-based optimization scheme is iterative, the distribution should influence the prior in the next iteration, ensuring that eventually reaches a locally optimal point. In this paper, we assume normal distributions for both the prior and posterior, propagating only the mean while using a fixed covariance . We do not perform empirical adaptation as outlined in (4 ###reference_###) and (10 ###reference_###)."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "2.2",
|
| 43 |
+
"parent_section_id": "2",
|
| 44 |
+
"section_name": "II-B Interior Point Differential Dynamic Programming",
|
| 45 |
+
"text": "IPDDP introduced in [23 ###reference_b23###] can be used to solve a standard discrete-time optimal control problem (OCP) given as\nwhere the variables and are the system state and the control input vector at time-step , respectively, and is the initial condition for the control system. Let denote the decision vector as that is the concatenation of sequential control inputs over a time horizon . The real-valued functions and are the final and stage cost functions, respectively, and defines the controlled state transitions. The vector-valued function defines inequality constraints where denotes the number of constraints. All functions defined in (11 ###reference_###) are assumed to be twice continuously differentiable.\nIn dynamic programming perspectives, the OCP (11 ###reference_###) can be converted into the Bellman equation form at time with a given state as follows:\nwhere is a value function for the next state and are slack variables. At the final stage, the value function is defined as .\nFor notational convenience, we drop the time index in the remainder of this section, with the understanding that all functions and variables remain time-dependent. The relaxed Lagrangian with the log-barrier terms of is defined by the following -function:\nwhere is the barrier parameter and is the Lagrangian multiplier. The relaxed value function is defined by a saddle point of the -function:"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "2.2.1",
|
| 49 |
+
"parent_section_id": "2.2",
|
| 50 |
+
"section_name": "II-B1 Backward Pass",
|
| 51 |
+
"text": "As in the standard DDP scheme, is perturbed up to the quadratic terms at the current nominal points:\nwhere is an all-ones vector and is a diagonal matrix associated with the vector .\nBy setting where , the step direction that satisfy the extremum condition corresponding to the first-order optimality is determined by the following primal-dual KKT system:\nSolving the KKT system (25 ###reference_###) for , we obtain\nwhere the coefficient matrices and vectors are defined as\nwith the intermediate parameters and vectors\nHere, and are known as the primal and dual residuals, respectively.\nThe KKT variables and can be rewritten as\nSubstituting above into the quadratic form in (20 ###reference_###) and setting result in another representation for the perturbed quadratic form:\nwhere and .\nFinally, we obtain the perturbed value function as follows:\nwhere the coefficients are given as\nThis perturbed value function is recursively used for at the next backward step."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "2.2.2",
|
| 55 |
+
"parent_section_id": "2.2",
|
| 56 |
+
"section_name": "II-B2 Forward Pass",
|
| 57 |
+
"text": "After calculating the perturbations in the backward pass, the nominal points are updated as follows:\n\nwhere represents the step size. In IPDDP, the value of is determined by the filter line-search method [1 ###reference_b1###]. This method starts with a step size of 1 and reduces incrementally. pdates are accepted as soon as they decrease either the cost or the violations of constraints. If no suitable is found, the forward pass is terminated and deemed unsuccessful."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "2.2.3",
|
| 61 |
+
"parent_section_id": "2.2",
|
| 62 |
+
"section_name": "II-B3 Convergence",
|
| 63 |
+
"text": "The barrier parameter is monotonically decreased whenever the local convergence to the central path has been achieved. The criterion for the local convergence is for some . The global convergence agrees with the sufficiently small ."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "2.2.4",
|
| 67 |
+
"parent_section_id": "2.2",
|
| 68 |
+
"section_name": "II-B4 Regularization",
|
| 69 |
+
"text": "To guarantee that is invertible in (II-B1 ###reference_###), the regularization parameter is added: . The parameter increases when it is not invertible or the failure has occurred in the forward pass. If reaches some upper bound , IPDDP is terminated for failure."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "3",
|
| 73 |
+
"parent_section_id": null,
|
| 74 |
+
"section_name": "III Collision-free Smooth Trajectory Generation",
|
| 75 |
+
"text": "This section considers the following OCP:\nwhere the variables and are the system state and the control input vector at time-step , respectively, and is the initial condition for the control system. is the position of a robot, and is the set of positions at which obstacles occupy. The functions , , and are defined as (11 ###reference_###). Notice that, unlike in (11 ###reference_###), the joint state-control constraints are decoupled into the state constraint and the input (control) constraint .\nThe proposed algorithm for solving (33 ###reference_###) has three steps: searching for a feasible coarse trajectory using MPPI, constructing path corridors, and smoothing the coarse trajectory by IPDDP."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "3.1",
|
| 79 |
+
"parent_section_id": "3",
|
| 80 |
+
"section_name": "III-A Model Predictive Path Integral",
|
| 81 |
+
"text": "We first generate a coarse trajectory using MPPI. The cost function is defined as\nwhere the indicator function is defined as\nensuring obstacle avoidance and the sequence of the states are determined by the initial state , the dynamics , and the controls .\nTo satisfy the control constraints in (33 ###reference_###), each th sample of control sequence vector is projected onto the constraint set, i.e. where is a projection operator onto the feasible set of controls . We assume that the set is compact and convex, ensuring that the projection is well-defined. This assumption allows us to leverage analytical solutions for projection, particularly in cases involving simple constraints like box constraints or second-order conic constraints.\nWith the method described in Section II-A2 ###reference_.SSS2###, locally optimal controls and corresponding states are obtained. Let be the resulting position of a robot from MPPI."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "3.2",
|
| 85 |
+
"parent_section_id": "3",
|
| 86 |
+
"section_name": "III-B Path Corridors",
|
| 87 |
+
"text": "###figure_2### ###figure_3### To construct corridors around the path , the following optimization problem is considered:\nwhere the indicator function for a radial collision-free corridor is defined as\nThe parameters are the weights, and is the maximum value of . Although the shape of the corridors can be arbitrary, here we choose a Euclidean ball which is represented by two variables: center and radius . The optimization problem (36 ###reference_###) is designed to enlarge the ball and have the center close to while containing inside the ball without intersection with obstacles (see Fig. 2 ###reference_###). If there are no obstacles around , then the solution is and .\nWe use the method described in Section II-A1 ###reference_.SSS1### with to solve the optimization problem (36 ###reference_###) at each stage of path planning to compute a sequence of collision-free corridors that are represented by and . As in MPPI, the constraints on in (36 ###reference_###) can be met by projection that is defined as ."
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "3.3",
|
| 91 |
+
"parent_section_id": "3",
|
| 92 |
+
"section_name": "III-C Trajectory Smoothing",
|
| 93 |
+
"text": "In our final step of trajectory optimization for path planning, we consider the following OCP for smoothing the coarse trajectory generated by MPPI:\nwhere is, again, the position of a robot, are the center and radius of the path corridor computed in (36 ###reference_###), and is a weight matrix penalizing deviations from the center of the corridor. We include the constraint in the last row of (38 ###reference_###) to to keep a robot staying inside the collision-free corridors.\nWe use IPDDP introduced in Section II-B ###reference_### to solve (38 ###reference_###) and obtain a smooth trajectory.\nAt the time, the coarse trajectory from MPPI can be used for an initial guess, i.e., a warm start for local optimization, which can much accelerate the convergence of IPDDP."
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "3.4",
|
| 97 |
+
"parent_section_id": "3",
|
| 98 |
+
"section_name": "III-D Algorithms",
|
| 99 |
+
"text": "Algorithm 1 ###reference_### outlines the proposed trajectory optimization method, named MPPI-IPDDP, which is designed to generate collision-free, smooth trajectories. The algorithm includes three subroutines. First, MPPI employs a derivative-free variational inference approach to search for a dynamically feasible but coarse trajectory. Second, Corridor also utilizes derivative-free variational inference to construct collision-free circular corridors around the coarse trajectory. Lastly, IPDDP uses a recursive method to smooth the coarse trajectory within these corridors. As demonstrated in the supplementary video, the proposed MPPI-IPDDP method has been verified to be capable of online replanning for low-speed robots."
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"section_id": "4",
|
| 103 |
+
"parent_section_id": null,
|
| 104 |
+
"section_name": "IV Case Studies",
|
| 105 |
+
"text": ""
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"section_id": "4.1",
|
| 109 |
+
"parent_section_id": "4",
|
| 110 |
+
"section_name": "IV-A Wheeled Mobile Robot",
|
| 111 |
+
"text": "###figure_4### ###figure_5### ###figure_6### ###figure_7### ###figure_8### For an example of path planning in 2D space, we consider a scenario that a differential wheeled robot arrives at a given target pose without collision. Consider the robot kinematics\nwhere are the positions of the x-axis and y-axis respectively, is the angle of the orientation, are velocity and angular velocity respectively, and is the time interval. The vectors and are states and controls respectively. We set the initial states as and sampling-time interval .\nThe constraints of the corresponding OCP for trajectory generation are defined as\nwhere is the set of obstacles shown in Fig. 3 ###reference_### in gray.\nThe cost functions of the corresponding OCP for trajectory generation are defined as\nwhere is the target pose. The parameters for the MPPI-IPDDP method are given in Tab. I ###reference_###.\nFig. 3 ###reference_### shows the processing results of generating a smooth trajectory. Fig. 5 ###reference_### gives a comparison between the zigzaging controls obtained from MPPI and the smoother ones by IPDDP. Fig. 5 ###reference_### shows that the cost and constraint violations reduce over MPPI-IPDDP iterations."
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"section_id": "4.2",
|
| 115 |
+
"parent_section_id": "4",
|
| 116 |
+
"section_name": "IV-B Quadrotor without Attitude",
|
| 117 |
+
"text": "###figure_9### ###figure_10### ###figure_11### ###figure_12### ###figure_13### For an example of path planning in 3D space, we consider a scenario that a quadrotor arrives at a given target position without collision.\nWe assume that the quadrotor can be modeled as a point mass. The kinematics is given by\nwhere are position and velocity respectively, is acceleration, is the gravitational acceleration, and is the vector of -axis. and are the state and control respectively. We set the initial state as and .\nThe constraints of the corresponding OCP for trajectory generation are defined as and with\nwhere and are the maximum value of acceleration and thrust angle, respectively.\nThis ensures the acceleration vector of the quadrotor remain within a defined conic region , and is the set of obstacles shown in Fig. 6 ###reference_### in gray. When the projection is performed to satisfy the conic constraint in (42 ###reference_###), we consider the following projection operator for the second-order cone with and :\nfor where and are a vector of a compatible dimension and a scalar. Notice that a projection might result in if . To handle this, we actually introduce a slack variable and consider the constraints in an extended space:\n\nwhere and .\nThe cost functions of the corresponding OCP for trajectory generation are defined as\nwhere is the target position. The parameters for the MPPI-IPDDP method are given in Tab. II ###reference_###.\nFig. 6 ###reference_### illustrates the process of generating a smooth trajectory. Fig. 8 ###reference_### compares the noisy control inputs generated by MPPI with the smoothed controls produced by IPDDP. Fig. 8 ###reference_### demonstrates how the cost and constraint violations decrease over successive iterations of the MPPI-IPDDP method."
|
| 118 |
+
},
|
| 119 |
+
{
|
| 120 |
+
"section_id": "4.3",
|
| 121 |
+
"parent_section_id": "4",
|
| 122 |
+
"section_name": "IV-C Comparative Study with Other MPPI Variants",
|
| 123 |
+
"text": "Considering the same scenario of a wheeled mobile robot given in Section IV-A ###reference_###, we compare the proposed MPPI-IPDDP with other existing MPPI methods (vanilla MPPI [15 ###reference_b15###], Log-MPPI [27 ###reference_b27###] and Smooth-MPPI [28 ###reference_b28###]) in terms of the computing time and smoothness.\nTo evaluate smoothness of generated trajectory, the following Mean Squared Curvature (MSC) was used:\nAt every step of open-loop trajectory generation, we defined the success condition in terms of the computing time and the distance from the target pose ,\n\nwhere and are predefined thresholds.\n###figure_14### ###figure_15### * Q1, Q2, and Q3 represent the first, second (median), and third quartiles, respectively, of the average computing time and mean squared cost (MSC), calculated from data consisting only of successful simulations.\n###figure_16### ###figure_17### ###figure_18### ###figure_19### For statistical comparisons of algorithmic performances, numerous simulations with varying parameters of MPPI algorithms were conducted. The number of MPPI samples () increased from 100 to 25600 by doubling at each step. The covariance matrix of control varied from to , where is the identity matrix of a compatible dimension. Fig. 10 ###reference_### shows the overall performance comparisons of four MPPI methods in terms of the success rate, computing time and trajectory smoothness with different number of samples and control covariance .\nBased on the simulation-based statistical analysis presented in Tab. III ###reference_### and Fig. 9 ###reference_###, although the first quartile (Q1) statistics of computing time were relatively slow, our MPPI-IPDDP method outperformed the other three MPPI methods in both computing time and the smoothness of trajectory generation. This implies that while the MPPI-IPDDP may have a slower start in some cases, it ultimately provides superior performance overall, achieving faster computations and smoother trajectories compared to the alternative MPPI methods. In addition, the performance of MPPI-IPDDP is less sensitive to changes in the MPPI parameters and . This means that the method is more robust and reliable across different settings of these parameters, as illustrated in Fig. 10 ###reference_###.\nWe also tested the proposed MPPI-IPDDP in 300 different scenarios of the BARN dataset [29 ###reference_b29###] and compared it with other MPPI methods in terms of success rate, computing time and trajectory MSC. The parameter values of and were customized for each method to optimize its performance. This extensive testing allowed us to assess the robustness and efficiency of the MPPI-IPDDP approach across a wide variety of challenging environments, ensuring that the method was evaluated under diverse conditions. The customized parameters helped each method perform at its best, providing a fair and comprehensive comparison.\nThe time horizon was set to , the maximum velocity of was reduced to 1.0 , and each state was defined as and . We expanded the map to be from with additional free space to prevent collision in initial and finish states. The map was also inflated to account for the size of the robot. To properly correspond with the cost calculation in the Corridor, a distance field was also calculated on the map.\nBased on the results of the parameter variation tests, we selected the optimal parameters that yielded the best performance in terms of success rate and smoothness. The results with the BARN dataset indicate that MPPI-IPDDP can generate smooth trajectories in various environments. Although it is more time-consuming than MPPI and Log-MPPI, MPPI-IPDDP produces the smoothest trajectories while using less time compared to Smooth-MPPI."
|
| 124 |
+
},
|
| 125 |
+
{
|
| 126 |
+
"section_id": "4.4",
|
| 127 |
+
"parent_section_id": "4",
|
| 128 |
+
"section_name": "IV-D Comparative Study with NLP-based Solvers",
|
| 129 |
+
"text": "###figure_20### ###figure_21### ###figure_22### ###table_1### In addition to comparisons with other MPPI variants, we also evaluated our hybrid trajectory optimization method against existing state-of-the-art (SOTA) NLP-based methods from a local planning perspective using a receding horizon scheme. Specifically, we compared our method with two baselines: IPOPT [1 ###reference_b1###] and IPDDP [23 ###reference_b23###].\nFor this comparison, we formulated a point-to-point 2D navigation problem for a simple unicycle model in a cluttered environment.222To ensure a fair comparison, we used MATLAB for all three methods. Specifically, since IPOPT [1 ###reference_b1###] and IPDDP [23 ###reference_b23###] were implemented using a MATLAB interface, we also employed a MATLAB version of the MPPI-IPDDP algorithm instead of a C++ version. IPOPT is written in C++ and uses a MATLAB interface for problem formulation, while the MPPI-IPDDP used in the comparisons for Tab. V ###reference_### and Fig. 11 ###reference_### is entirely implemented in MATLAB. Similarly, IPDDP is also written in MATLAB, which leads to slower execution times compared to C++ implementations.\nWe treated the obstacle avoidance sub-problem as a constraint for the two gradient-based solvers, considering smooth ball-type obstacles. We set the same iteration limit and horizon length with random initial guesses for both solvers and our method. Simulations were conducted until the robot reached the desired position in two environments, as shown in Figs. 11a ###reference_.sf1### and 11b ###reference_.sf2###. Both figures depict closed-loop position trajectories resulting from the implementation of a receding horizon scheme. Due to the dependency of NLP-based solvers on initial guesses, the robot sometimes failed to reach the goal point. Fig. 11a ###reference_.sf1### illustrates that gradient-based solvers can fail in cases of conflicting gradients, whereas our method can escape these trapped situations regardless of the initial guesses.\nTo ensure a fair evaluation of computational time and smoothness, we compared the methods in the same environment (Fig. 11b ###reference_.sf2###). Comparisons of the average, minimum, and maximum computing times, as well as the MSC as a smoothness index, are presented in Tab. V ###reference_###. We calculated the MSC for both closed-loop position trajectories and open-loop control input trajectories, particularly for angular velocity. The results show that our method is computationally stable and produces smoother control input trajectories compared to the other methods, as also illustrated in Fig. 11c ###reference_.sf3###."
|
| 130 |
+
},
|
| 131 |
+
{
|
| 132 |
+
"section_id": "5",
|
| 133 |
+
"parent_section_id": null,
|
| 134 |
+
"section_name": "Discussion and Future Work",
|
| 135 |
+
"text": ""
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"section_id": "5.1",
|
| 139 |
+
"parent_section_id": "5",
|
| 140 |
+
"section_name": "Remaining Challenges",
|
| 141 |
+
"text": "There are still several remaining issues that should be further challenged.\nThe proposed algorithm involves three iterative stages, making computation time demanding on a CPU. However, using a GPU for the MPPI stage to leverage massive parallel computation can significantly reduce processing time. The number of iterations needed for IPDDP is relatively low because the initial trajectory input is close to a local optimal solution.\nThe closer a robot is to obstacles, the higher the likelihood of failure in generating corridors. When a robot makes close contact with obstacles, it becomes challenging to sample a corridor that includes the robot but excludes the obstacle. Alternatively, a soft constraint to keep the robot inside the corridor can be adaptively relaxed by reducing the weight in (36 ###reference_###), whenever the robot gets close to an obstacle.\nFor more precise planning of safety-critical missions, uncertainties induced by modeling errors and external disturbances should be explicitly considered. In our MPPI-IPDDP framework, uncertainties could be addressed in the MPPI, Corridor, or IPDDP steps: (a) In MPPI with uncertainty, the cost evaluation of (34 ###reference_###) in Alg. 2 ###reference_### should include a risk-sensitive term that accounts for uncertainties in dynamics and obstacles; (b) In the Corridor step with uncertainty, the cost evaluation of (36 ###reference_###) in Alg. 3 ###reference_### should be modified to account for uncertainties in obstacle configurations; and (c) In IPDDP with uncertainty, approaches similar to those used in tube-based robust MPC [30 ###reference_b30###] and chance-constrained stochastic MPC [31 ###reference_b31###, 32 ###reference_b32###] could be employed to handle uncertainties in planning. However, this may result in conservative constraints due to increasing uncertainty propagation over the horizon.\nAt the current stage, our focus is on single-robot trajectory optimization, not multi-robot motion planning. In the future, we plan to extend the proposed method to multi-robot trajectory optimization in both cooperative and competitive settings."
|
| 142 |
+
},
|
| 143 |
+
{
|
| 144 |
+
"section_id": "6",
|
| 145 |
+
"parent_section_id": null,
|
| 146 |
+
"section_name": "VI Conclusions",
|
| 147 |
+
"text": "In this paper, we introduced MPPI-IPDDP, a new hybrid optimization-based local path planning method designed to generate collision-free, smooth, and optimal trajectories. Through two case studies, we demonstrated the effectiveness of the proposed MPPI-IPDDP in environments with complex obstacle layouts. However, there is still room for improvement. As discussed, incorporating Stein Variational Gradient Descent (SVGD) could enhance exploration capabilities. Additionally, addressing planning under uncertainty remains a key challenge. Future work will focus on applying the MPPI-IPDDP algorithm in real-world hardware implementations and integrating it with a global planner."
|
| 148 |
+
}
|
| 149 |
+
],
|
| 150 |
+
"appendix": [],
|
| 151 |
+
"tables": {
|
| 152 |
+
"1": {
|
| 153 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T1\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table I: </span>Parameters for trajectory optimization of a wheeled mobile robot in Section\u00a0<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2208.02439v2#S4.SS1\" title=\"IV-A Wheeled Mobile Robot \u2023 IV Case Studies \u2023 MPPI-IPDDP: A Hybrid Method of Collision-Free Smooth Trajectory Generation for Autonomous Robots\"><span class=\"ltx_text ltx_ref_tag\"><span class=\"ltx_text\">IV-A</span></span></a>.</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.T1.14.14\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T1.14.14.15.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_th_row ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T1.14.14.15.1.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.14.14.15.1.1.1\">Parameter</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_th_row ltx_border_rr ltx_border_t\" id=\"S4.T1.14.14.15.1.2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.14.14.15.1.2.1\">Value</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T1.14.14.15.1.3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.14.14.15.1.3.1\">Parameter</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T1.14.14.15.1.4\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.14.14.15.1.4.1\">Value</span></th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T1.2.2.2\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T1.1.1.1.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_rr ltx_border_t\" id=\"S4.T1.2.2.2.3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">20</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.2.2.2.2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.2.2.2.4\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">35</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.4.4.4\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T1.3.3.3.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_rr ltx_border_t\" id=\"S4.T1.4.4.4.3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.5</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.4.4.4.2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.4.4.4.4\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">50</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.6.6.6\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T1.5.5.5.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_rr ltx_border_t\" id=\"S4.T1.6.6.6.3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">1000</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.6.6.6.2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.6.6.6.4\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">100</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.9.9.9\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T1.7.7.7.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_rr ltx_border_t\" id=\"S4.T1.9.9.9.4\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">5000</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.8.8.8.2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.9.9.9.3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.12.12.12\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T1.10.10.10.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_rr ltx_border_t\" id=\"S4.T1.12.12.12.4\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">3000</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.11.11.11.2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.12.12.12.3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.14.14.14\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_b ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T1.13.13.13.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_b ltx_border_rr ltx_border_t\" id=\"S4.T1.14.14.14.2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></th>\n<td class=\"ltx_td ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T1.14.14.14.3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></td>\n<td class=\"ltx_td ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T1.14.14.14.4\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 154 |
+
"capture": "Table I: Parameters for trajectory optimization of a wheeled mobile robot in Section\u00a0IV-A."
|
| 155 |
+
},
|
| 156 |
+
"2": {
|
| 157 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T2\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table II: </span>Parameters for trajectory optimization of a quadrator in Section\u00a0<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2208.02439v2#S4.SS2\" title=\"IV-B Quadrotor without Attitude \u2023 IV Case Studies \u2023 MPPI-IPDDP: A Hybrid Method of Collision-Free Smooth Trajectory Generation for Autonomous Robots\"><span class=\"ltx_text ltx_ref_tag\"><span class=\"ltx_text\">IV-B</span></span></a>.</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.T2.14.14\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T2.14.14.15.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_th_row ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T2.14.14.15.1.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.14.14.15.1.1.1\">Parameter</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_th_row ltx_border_rr ltx_border_t\" id=\"S4.T2.14.14.15.1.2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.14.14.15.1.2.1\">Value</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T2.14.14.15.1.3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.14.14.15.1.3.1\">Parameter</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T2.14.14.15.1.4\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.14.14.15.1.4.1\">Value</span></th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T2.2.2.2\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T2.1.1.1.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_rr ltx_border_t\" id=\"S4.T2.2.2.2.3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">20</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T2.2.2.2.2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T2.2.2.2.4\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">35</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.4.4.4\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T2.3.3.3.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_rr ltx_border_t\" id=\"S4.T2.4.4.4.3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.5</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T2.4.4.4.2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T2.4.4.4.4\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">30</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.6.6.6\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T2.5.5.5.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_rr ltx_border_t\" id=\"S4.T2.6.6.6.3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">1000</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T2.6.6.6.2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T2.6.6.6.4\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">100</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.9.9.9\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T2.7.7.7.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_rr ltx_border_t\" id=\"S4.T2.9.9.9.4\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">8000</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T2.8.8.8.2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T2.9.9.9.3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.12.12.12\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T2.10.10.10.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_rr ltx_border_t\" id=\"S4.T2.12.12.12.4\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">5000</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T2.11.11.11.2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T2.12.12.12.3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.14.14.14\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_b ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T2.13.13.13.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_b ltx_border_rr ltx_border_t\" id=\"S4.T2.14.14.14.2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></th>\n<td class=\"ltx_td ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T2.14.14.14.3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></td>\n<td class=\"ltx_td ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T2.14.14.14.4\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 158 |
+
"capture": "Table II: Parameters for trajectory optimization of a quadrator in Section\u00a0IV-B."
|
| 159 |
+
},
|
| 160 |
+
"3": {
|
| 161 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T3\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table III: </span>Comparison of computing time and smoothness for different MPPI methods.</figcaption><div class=\"ltx_flex_figure\">\n<div class=\"ltx_flex_cell ltx_flex_size_1\">\n<table class=\"ltx_tabular ltx_centering ltx_figure_panel ltx_guessed_headers ltx_align_middle\" id=\"S4.T3.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T3.1.1.1\">\n<th class=\"ltx_td ltx_nopad_r ltx_th ltx_th_column ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T3.1.1.1.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"></th>\n<th class=\"ltx_td ltx_nopad_l ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T3.1.1.1.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"></th>\n<th class=\"ltx_td ltx_nopad_l ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T3.1.1.1.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">MPPI</th>\n<th class=\"ltx_td ltx_nopad_l ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T3.1.1.1.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">\n<table class=\"ltx_tabular ltx_align_middle\" id=\"S4.T3.1.1.1.4.1\">\n<tr class=\"ltx_tr\" id=\"S4.T3.1.1.1.4.1.1\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.1.1.1.4.1.1.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">Log-</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.1.1.1.4.1.2\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.1.1.1.4.1.2.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">MPPI</td>\n</tr>\n</table>\n</th>\n<th class=\"ltx_td ltx_nopad_l ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T3.1.1.1.5\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">\n<table class=\"ltx_tabular ltx_align_middle\" id=\"S4.T3.1.1.1.5.1\">\n<tr class=\"ltx_tr\" id=\"S4.T3.1.1.1.5.1.1\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.1.1.1.5.1.1.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">Smooth-</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.1.1.1.5.1.2\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.1.1.1.5.1.2.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">MPPI</td>\n</tr>\n</table>\n</th>\n<th class=\"ltx_td ltx_nopad_l ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T3.1.1.1.6\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">\n<table class=\"ltx_tabular ltx_align_middle\" id=\"S4.T3.1.1.1.6.1\">\n<tr class=\"ltx_tr\" id=\"S4.T3.1.1.1.6.1.1\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.1.1.1.6.1.1.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">MPPI-</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.1.1.1.6.1.2\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.1.1.1.6.1.2.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">IPDDP</td>\n</tr>\n</table>\n</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T3.1.2.1\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T3.1.2.1.1\" rowspan=\"3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text\" id=\"S4.T3.1.2.1.1.1\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S4.T3.1.2.1.1.1.1\">\n<span class=\"ltx_tr\" id=\"S4.T3.1.2.1.1.1.1.1\">\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T3.1.2.1.1.1.1.1.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">Avg comp</span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.1.2.1.1.1.1.2\">\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T3.1.2.1.1.1.1.2.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">time [sec]</span></span>\n</span></span></td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.1.2.1.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">Q1</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.1.2.1.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">0.026586</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.1.2.1.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">0.017963</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.1.2.1.5\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">0.073111</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.1.2.1.6\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">0.244655</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.1.3.2\">\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.1.3.2.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">Q2</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.1.3.2.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">0.855942</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.1.3.2.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">1.15359</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.1.3.2.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">0.82165</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.1.3.2.5\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">0.408745</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.1.4.3\">\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.1.4.3.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">Q3</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.1.4.3.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">5.426678</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.1.4.3.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">6.451571</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.1.4.3.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">6.881098</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.1.4.3.5\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">1.000769</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.1.5.4\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_center ltx_border_b ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T3.1.5.4.1\" rowspan=\"3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text\" id=\"S4.T3.1.5.4.1.1\">MSC</span></td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.1.5.4.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">Q1</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.1.5.4.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">0.001468</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.1.5.4.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">0.003742</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.1.5.4.5\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">0.001631</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.1.5.4.6\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">0.000016</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.1.6.5\">\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.1.6.5.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">Q2</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.1.6.5.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">0.006526</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.1.6.5.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">0.008639</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.1.6.5.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">0.004582</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.1.6.5.5\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">0.000025</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.1.7.6\">\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T3.1.7.6.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">Q3</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T3.1.7.6.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">0.01098</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T3.1.7.6.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">0.013666</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T3.1.7.6.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">0.009768</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T3.1.7.6.5\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">0.000078</td>\n</tr>\n</tbody>\n</table>\n</div>\n<div class=\"ltx_flex_break\"></div>\n<div class=\"ltx_flex_cell ltx_flex_size_1\">\n<p class=\"ltx_p ltx_figure_panel ltx_align_center\" id=\"S4.T3.2\"><span class=\"ltx_text\" id=\"S4.T3.2.1\" style=\"font-size:50%;\">* Q1, Q2, and Q3 represent the first, second (median), and third quartiles, respectively, of the average computing time and mean squared cost (MSC), calculated from data consisting only of successful simulations. </span></p>\n</div>\n</div>\n</figure>",
|
| 162 |
+
"capture": "Table III: Comparison of computing time and smoothness for different MPPI methods."
|
| 163 |
+
},
|
| 164 |
+
"4": {
|
| 165 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T4\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table IV: </span>Performance comparison of MPPI methods in the BARN dataset\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2208.02439v2#bib.bib29\" title=\"\">29</a>]</cite>.</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.T4.2\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T4.2.3.1\">\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T4.2.3.1.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></th>\n<th class=\"ltx_td ltx_nopad_l ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T4.2.3.1.2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">MPPI</th>\n<th class=\"ltx_td ltx_nopad_l ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T4.2.3.1.3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">\n<table class=\"ltx_tabular ltx_align_middle\" id=\"S4.T4.2.3.1.3.1\">\n<tr class=\"ltx_tr\" id=\"S4.T4.2.3.1.3.1.1\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.2.3.1.3.1.1.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">Log-</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.2.3.1.3.1.2\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.2.3.1.3.1.2.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">MPPI</td>\n</tr>\n</table>\n</th>\n<th class=\"ltx_td ltx_nopad_l ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T4.2.3.1.4\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">\n<table class=\"ltx_tabular ltx_align_middle\" id=\"S4.T4.2.3.1.4.1\">\n<tr class=\"ltx_tr\" id=\"S4.T4.2.3.1.4.1.1\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.2.3.1.4.1.1.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">Smooth-</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.2.3.1.4.1.2\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.2.3.1.4.1.2.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">MPPI</td>\n</tr>\n</table>\n</th>\n<th class=\"ltx_td ltx_nopad_l ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T4.2.3.1.5\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">\n<table class=\"ltx_tabular ltx_align_middle\" id=\"S4.T4.2.3.1.5.1\">\n<tr class=\"ltx_tr\" id=\"S4.T4.2.3.1.5.1.1\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.2.3.1.5.1.1.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">MPPI-</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.2.3.1.5.1.2\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.2.3.1.5.1.2.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">IPDDP</td>\n</tr>\n</table>\n</th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.1.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T4.1.1.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></th>\n<th class=\"ltx_td ltx_nopad_l ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T4.1.1.2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">3200</th>\n<th class=\"ltx_td ltx_nopad_l ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T4.1.1.3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">3200</th>\n<th class=\"ltx_td ltx_nopad_l ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T4.1.1.4\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">12800</th>\n<th class=\"ltx_td ltx_nopad_l ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T4.1.1.5\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">1600</th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.2.2\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T4.2.2.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></th>\n<th class=\"ltx_td ltx_nopad_l ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T4.2.2.2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.2</th>\n<th class=\"ltx_td ltx_nopad_l ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T4.2.2.3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.1</th>\n<th class=\"ltx_td ltx_nopad_l ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T4.2.2.4\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.3</th>\n<th class=\"ltx_td ltx_nopad_l ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T4.2.2.5\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.4</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T4.2.4.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T4.2.4.1.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">Success ratio [%]</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T4.2.4.1.2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">97</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T4.2.4.1.3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">97</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T4.2.4.1.4\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">91.3</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T4.2.4.1.5\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">95.7</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.2.5.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T4.2.5.2.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">Q1 Time [sec]</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T4.2.5.2.2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.121522</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T4.2.5.2.3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.169059</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T4.2.5.2.4\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.446100</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T4.2.5.2.5\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.274369</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.2.6.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T4.2.6.3.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">Q2 Time [sec]</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T4.2.6.3.2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.139242</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T4.2.6.3.3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.197344</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T4.2.6.3.4\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.511798</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T4.2.6.3.5\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.299426</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.2.7.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T4.2.7.4.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">Q3 Time [sec]</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T4.2.7.4.2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.179549</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T4.2.7.4.3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.232929</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T4.2.7.4.4\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.598198</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T4.2.7.4.5\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.346971</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.2.8.5\">\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T4.2.8.5.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">MSC</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T4.2.8.5.2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.002528</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T4.2.8.5.3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.003089</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T4.2.8.5.4\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.005856</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T4.2.8.5.5\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.000139</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 166 |
+
"capture": "Table IV: Performance comparison of MPPI methods in the BARN dataset\u00a0[29]."
|
| 167 |
+
},
|
| 168 |
+
"5": {
|
| 169 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T5\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table V: </span>Comparison of computing time and smoothness with NLP-based Solvers.</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_align_middle\" id=\"S4.T5.1\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T5.1.1.1\">\n<td class=\"ltx_td ltx_nopad_r ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T5.1.1.1.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></td>\n<td class=\"ltx_td ltx_nopad_l ltx_border_r ltx_border_t\" id=\"S4.T5.1.1.1.2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"></td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T5.1.1.1.3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">IPDDP</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T5.1.1.1.4\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">IPOPT</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T5.1.1.1.5\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">MPPI-IPDDP</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.2.2\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T5.1.2.2.1\" rowspan=\"3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"><span class=\"ltx_text\" id=\"S4.T5.1.2.2.1.1\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S4.T5.1.2.2.1.1.1\">\n<span class=\"ltx_tr\" id=\"S4.T5.1.2.2.1.1.1.1\">\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.2.2.1.1.1.1.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">Comp</span></span>\n<span class=\"ltx_tr\" id=\"S4.T5.1.2.2.1.1.1.2\">\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.2.2.1.1.1.2.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">time [sec]</span></span>\n</span></span></td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T5.1.2.2.2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">Mean</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T5.1.2.2.3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">1.64</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T5.1.2.2.4\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.117</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T5.1.2.2.5\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.539</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.3.3\">\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T5.1.3.3.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">Min</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T5.1.3.3.2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">1.38</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T5.1.3.3.3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.06</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T5.1.3.3.4\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.406</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.4.4\">\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T5.1.4.4.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">Max</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T5.1.4.4.2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">3.17</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T5.1.4.4.3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">1.269</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T5.1.4.4.4\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.962</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.5.5\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_center ltx_border_b ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T5.1.5.5.1\" rowspan=\"2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\"><span class=\"ltx_text\" id=\"S4.T5.1.5.5.1.1\">MSC</span></td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T5.1.5.5.2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">State</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T5.1.5.5.3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.64</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T5.1.5.5.4\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.29</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T5.1.5.5.5\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">1.42</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.6.6\">\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T5.1.6.6.1\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">Control</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T5.1.6.6.2\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.0530</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T5.1.6.6.3\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.0527</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T5.1.6.6.4\" style=\"padding-top:0.25pt;padding-bottom:0.25pt;\">0.0027</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 170 |
+
"capture": "Table V: Comparison of computing time and smoothness with NLP-based Solvers."
|
| 171 |
+
}
|
| 172 |
+
},
|
| 173 |
+
"image_paths": {
|
| 174 |
+
"1": {
|
| 175 |
+
"figure_path": "2208.02439v2_figure_1.png",
|
| 176 |
+
"caption": "Figure 1: A method for collision-free smooth path planning. The contents in the red box are subjects in this paper.",
|
| 177 |
+
"url": "http://arxiv.org/html/2208.02439v2/extracted/5964176/fig/summary_rev2.png"
|
| 178 |
+
},
|
| 179 |
+
"2(a)": {
|
| 180 |
+
"figure_path": "2208.02439v2_figure_2(a).png",
|
| 181 |
+
"caption": "(a) A maximally inflated path corridor\nFigure 2: Schematics of for collision-free path corridors.",
|
| 182 |
+
"url": "http://arxiv.org/html/2208.02439v2/extracted/5964176/fig/corridor1.png"
|
| 183 |
+
},
|
| 184 |
+
"2(b)": {
|
| 185 |
+
"figure_path": "2208.02439v2_figure_2(b).png",
|
| 186 |
+
"caption": "(b) Inflation process\nFigure 2: Schematics of for collision-free path corridors.",
|
| 187 |
+
"url": "http://arxiv.org/html/2208.02439v2/extracted/5964176/fig/corridor2.png"
|
| 188 |
+
},
|
| 189 |
+
"3": {
|
| 190 |
+
"figure_path": "2208.02439v2_figure_3.png",
|
| 191 |
+
"caption": "Figure 3: The iterations of the MPPI-IPDDP algorithm for generating a collision-free path from (0,0)00(0,0)( 0 , 0 ) to (0,6)06(0,6)( 0 , 6 ). In the figure, black dots represent the positions of the robot, red circles denote the path corridors, and gray areas indicate obstacles. During the early iterations, the constraints are violated (as the black dots are outside the corridors) because the IPDDP struggled with the infeasible starting point and was terminated by the user-defined maximum iteration limit, as illustrated in \u2460\u223csimilar-to\\sim\u223c\u2462. However, as the MPPI-IPDDP algorithm continues to iterate, it eventually finds the optimal collision-free trajectory, as shown in \u2467.",
|
| 192 |
+
"url": "http://arxiv.org/html/2208.02439v2/extracted/5964176/fig/optseq.png"
|
| 193 |
+
},
|
| 194 |
+
"4(a)": {
|
| 195 |
+
"figure_path": "2208.02439v2_figure_4(a).png",
|
| 196 |
+
"caption": "(a) Coarse controls by MPPI\nFigure 5: Cost reduction and convergence rate over MPPI-IPDDP iterations.",
|
| 197 |
+
"url": "http://arxiv.org/html/2208.02439v2/extracted/5964176/fig/fig4a.png"
|
| 198 |
+
},
|
| 199 |
+
"4(b)": {
|
| 200 |
+
"figure_path": "2208.02439v2_figure_4(b).png",
|
| 201 |
+
"caption": "(b) Smooth controls by IPDDP\nFigure 5: Cost reduction and convergence rate over MPPI-IPDDP iterations.",
|
| 202 |
+
"url": "http://arxiv.org/html/2208.02439v2/extracted/5964176/fig/fig4b.png"
|
| 203 |
+
},
|
| 204 |
+
"4(c)": {
|
| 205 |
+
"figure_path": "2208.02439v2_figure_4(c).png",
|
| 206 |
+
"caption": "(a) The terminal state cost of the trajectory reduces over iterations.\nFigure 5: Cost reduction and convergence rate over MPPI-IPDDP iterations.",
|
| 207 |
+
"url": "http://arxiv.org/html/2208.02439v2/extracted/5964176/fig/fig5a.png"
|
| 208 |
+
},
|
| 209 |
+
"4(d)": {
|
| 210 |
+
"figure_path": "2208.02439v2_figure_4(d).png",
|
| 211 |
+
"caption": "(b) The maximum value of the primal residuals approaches 0, meaning that the constraints are satisfied.\nFigure 5: Cost reduction and convergence rate over MPPI-IPDDP iterations.",
|
| 212 |
+
"url": "http://arxiv.org/html/2208.02439v2/extracted/5964176/fig/fig5b.png"
|
| 213 |
+
},
|
| 214 |
+
"5": {
|
| 215 |
+
"figure_path": "2208.02439v2_figure_5.png",
|
| 216 |
+
"caption": "Figure 6: The iterations for generating an optimal collision-free path from (0,0,0)000(0,0,0)( 0 , 0 , 0 ) to (0,4,2)042(0,4,2)( 0 , 4 , 2 ) by MPPI-IPDDP. The black dots are position of the quadrotor, the red spheres are the path corridors, and the gray represents obstacles. The optimal trajectory passes the small hole and reaches the destination. Details can be found in accompanying video.",
|
| 217 |
+
"url": "http://arxiv.org/html/2208.02439v2/extracted/5964176/fig/fig6n.png"
|
| 218 |
+
},
|
| 219 |
+
"6(a)": {
|
| 220 |
+
"figure_path": "2208.02439v2_figure_6(a).png",
|
| 221 |
+
"caption": "(a) Coarse controls by MPPI\nFigure 8: Cost reduction and convergence rate over MPPI-IPDDP iterations.",
|
| 222 |
+
"url": "http://arxiv.org/html/2208.02439v2/extracted/5964176/fig/fig7a.png"
|
| 223 |
+
},
|
| 224 |
+
"6(b)": {
|
| 225 |
+
"figure_path": "2208.02439v2_figure_6(b).png",
|
| 226 |
+
"caption": "(b) Smooth controls by IPDDP\nFigure 8: Cost reduction and convergence rate over MPPI-IPDDP iterations.",
|
| 227 |
+
"url": "http://arxiv.org/html/2208.02439v2/extracted/5964176/fig/fig7b.png"
|
| 228 |
+
},
|
| 229 |
+
"6(c)": {
|
| 230 |
+
"figure_path": "2208.02439v2_figure_6(c).png",
|
| 231 |
+
"caption": "(a) The terminal state cost of the trajectory reduces over iterations.\nFigure 8: Cost reduction and convergence rate over MPPI-IPDDP iterations.",
|
| 232 |
+
"url": "http://arxiv.org/html/2208.02439v2/extracted/5964176/fig/fig9a.png"
|
| 233 |
+
},
|
| 234 |
+
"6(d)": {
|
| 235 |
+
"figure_path": "2208.02439v2_figure_6(d).png",
|
| 236 |
+
"caption": "(b) The maximum value of the primal residuals approaches 0, meaning that the constraints are satisfied.\nFigure 8: Cost reduction and convergence rate over MPPI-IPDDP iterations.",
|
| 237 |
+
"url": "http://arxiv.org/html/2208.02439v2/extracted/5964176/fig/fig9b.png"
|
| 238 |
+
},
|
| 239 |
+
"7(a)": {
|
| 240 |
+
"figure_path": "2208.02439v2_figure_7(a).png",
|
| 241 |
+
"caption": "(a) Average computing time\nFigure 9: Performance comparisons for MPPI methods.",
|
| 242 |
+
"url": "http://arxiv.org/html/2208.02439v2/extracted/5964176/fig/fig8a.png"
|
| 243 |
+
},
|
| 244 |
+
"7(b)": {
|
| 245 |
+
"figure_path": "2208.02439v2_figure_7(b).png",
|
| 246 |
+
"caption": "(b) Mean squared curvature\nFigure 9: Performance comparisons for MPPI methods.",
|
| 247 |
+
"url": "http://arxiv.org/html/2208.02439v2/extracted/5964176/fig/fig8b.png"
|
| 248 |
+
},
|
| 249 |
+
"8(a)": {
|
| 250 |
+
"figure_path": "2208.02439v2_figure_8(a).png",
|
| 251 |
+
"caption": "(a) MPPI\nFigure 10: Performance comparisons of the success rates, average computing time, trajectory MSC for MPPI methods.",
|
| 252 |
+
"url": "http://arxiv.org/html/2208.02439v2/extracted/5964176/fig/fig10a.png"
|
| 253 |
+
},
|
| 254 |
+
"8(b)": {
|
| 255 |
+
"figure_path": "2208.02439v2_figure_8(b).png",
|
| 256 |
+
"caption": "(b) Log-MPPI\nFigure 10: Performance comparisons of the success rates, average computing time, trajectory MSC for MPPI methods.",
|
| 257 |
+
"url": "http://arxiv.org/html/2208.02439v2/extracted/5964176/fig/fig10b.png"
|
| 258 |
+
},
|
| 259 |
+
"8(c)": {
|
| 260 |
+
"figure_path": "2208.02439v2_figure_8(c).png",
|
| 261 |
+
"caption": "(c) Smooth-MPPI\nFigure 10: Performance comparisons of the success rates, average computing time, trajectory MSC for MPPI methods.",
|
| 262 |
+
"url": "http://arxiv.org/html/2208.02439v2/extracted/5964176/fig/fig10c.png"
|
| 263 |
+
},
|
| 264 |
+
"8(d)": {
|
| 265 |
+
"figure_path": "2208.02439v2_figure_8(d).png",
|
| 266 |
+
"caption": "(d) MPPI-IPDDP\nFigure 10: Performance comparisons of the success rates, average computing time, trajectory MSC for MPPI methods.",
|
| 267 |
+
"url": "http://arxiv.org/html/2208.02439v2/extracted/5964176/fig/fig10d.png"
|
| 268 |
+
},
|
| 269 |
+
"9(a)": {
|
| 270 |
+
"figure_path": "2208.02439v2_figure_9(a).png",
|
| 271 |
+
"caption": "(a) Env. 1: IPOPT fails to generate a collision-free trajectory.\nFigure 11: Comparisons with continuous optimization-based solvers.",
|
| 272 |
+
"url": "http://arxiv.org/html/2208.02439v2/extracted/5964176/fig/fig13/failed.png"
|
| 273 |
+
},
|
| 274 |
+
"9(b)": {
|
| 275 |
+
"figure_path": "2208.02439v2_figure_9(b).png",
|
| 276 |
+
"caption": "(b) Env. 2: All methods succeed.\nFigure 11: Comparisons with continuous optimization-based solvers.",
|
| 277 |
+
"url": "http://arxiv.org/html/2208.02439v2/extracted/5964176/fig/fig13/same_trajectory.png"
|
| 278 |
+
},
|
| 279 |
+
"9(c)": {
|
| 280 |
+
"figure_path": "2208.02439v2_figure_9(c).png",
|
| 281 |
+
"caption": "(c) Open loop control trajectories of Env. 2 in Fig 11b.\nFigure 11: Comparisons with continuous optimization-based solvers.",
|
| 282 |
+
"url": "http://arxiv.org/html/2208.02439v2/extracted/5964176/fig/fig13/input.png"
|
| 283 |
+
}
|
| 284 |
+
},
|
| 285 |
+
"validation": true,
|
| 286 |
+
"references": [],
|
| 287 |
+
"url": "http://arxiv.org/html/2208.02439v2"
|
| 288 |
+
}
|
20241030/2212.08841v4.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2212.10388v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2301.11486v3.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2301.13603v3.json
ADDED
|
@@ -0,0 +1,265 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Limits of structures and Total NP Search Problems11footnote 1This work has been supported by Charles University Research Center program No.UNCE/SCI/022, the project SVV-2023-260721 and by the GA UK project No. 246223.",
|
| 3 |
+
"abstract": "For an infinite class of finite graphs of unbounded size, we define a limit object, to be called a wide limit, relative to some computationally restricted class of functions. The limit object is a first order Boolean-valued structure. The first order properties of the wide limit then reflect how a computationally restricted viewer \u201csees\u201d a generic\nmember of the class. The construction uses arithmetic forcing with\nrandom variables [11]. We give sufficient conditions for universal and existential sentences to be valid in the limit, provide several examples, and prove that such a limit object can then be expanded to a model of weak arithmetic.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "The notion of limits of finite structures is prevalent both in logic and in combinatorics. In logic the examples are the ultraproduct and the compactness theorem, which was used in [4 ###reference_b4###] to prove the \u2013 law for structures over relational vocabularies. In combinatorics the dense graph limit defined in [13 ###reference_b13###] provided a framework to restate and find new proofs for results in extremal graph theory \u2014 for instance Goodman\u2019s theorem relating the number of edges to the number of triangles in a graph. More notions of graph limits are discussed in [15 ###reference_b15###]. Another recent use of limit objects for results of extremal combinatorics was by Razborov in [17 ###reference_b17###].\nIn this work we define a new construction of a limit object.\nGiven a class of finite graphs , whose vertex sets are initial segments of , we can stratify it into the sequence of sets as follows\nWe are interested in the case when the cardinalities of are unbounded and hence our intended limit is a limit of sets of finite graphs. For this reason we call such a sequence of sets of graphs a wide sequence and the limit object its wide limit. The qualification wide refers to the fact that we are interested in sequences of sets of graphs rather than sequences of individual graphs.\nWe will give a full definition in Section 3 ###reference_###, but we describe some important properties here. For a class of functions (typically chosen to be a class with some computational restrictions) we define the wide limit denoted , where is a technical parameter to be defined later. The wide limit is a Boolean-valued graph333Generally, we can do this with any -structures for some first order language . The limit object is then a Boolean-valued -structure . In this work we restrict ourselves to the language of graphs to simplify the presentation. \u2014 its edge relation does not only permit the truth values 0 and 1 but also many other values from some infinite complete Boolean algebra . This algebra is in fact also a measure algebra with a measure on it, so to any statement formulated as a first order sentence we can assign a real number which measures how far the truth value of (denoted ) is from the value 0. The key method we use is arithmetical forcing with random variables, developed in [11 ###reference_b11###], which allows us to construct models of (weak) arithmetical theories and by restricting to the language of graphs gives us Boolean-valued graphs. In these Boolean-valued graphs, existential statements which obtain the maximal truth-value 1 (valid sentences) correspond to the ability of to solve search problems over the class of graphs we are considering.\nOur limit object can be expanded to the original model that Kraj\u00ed\u010dek\u2019s method would otherwise construct. We prove (Theorem 4.8 ###reference_thrm8###) that the truth values of first order sentences concerning the object are preserved even when evaluated in the model of arithmetic relativized to the wide limit (under a mild condition on the family ).\nAs an application of this construction, we reprove a many-one separation of black-box total NP search problems. A total NP search problem, first introduced in [16 ###reference_b16###], is a problem which always has a solution of size polynomial in the input size and the correctness of its solution can be verified by a polynomial time machine. The qualifier black-box means that the input is given by a pair , where is a binary string and is a function oracle. The problems of our interest are the following: The problem RetractionWeakPigeon, whose totality follows from a non-existence of a retraction from and . And the problem WeakPigeon, whose totality follows from a non-existence of an injection between the same pair of sets.\nWe take a wide limit of all maps from the interval to the interval relative to trees querying images of elements of subexponential depth (in the length of ) to obtain a Boolean-valued graph of a function , which is in a sense elementarily equivalent to an injective map from the interval to for some non-standard number . We then expand the wide limit into a model of arithmetic , where RetractionWeakPigeon is total, but is an instance of WeakPigeon which has no solution.\nThere is already an established connection between complexity of search problems and logic (namely witnessing theorems in bounded arithmetic, see [7 ###reference_b7###], [14 ###reference_b14###]). The model we construct is a model of relatively weak theory , and also of open induction and open comprehension with parameters. The existence of this model gives a new proof of the result of Thapen [18 ###reference_b18###, Theorem 4.10], that these principles along with the principle that RetractionWeakPigeon is total cannot prove that the problem WeakPigeon is total.\nThis paper has two main parts. The emphasis of the paper is on the first conceptual part, where we introduce the new notion of a wide limit. This part consists of Sections 2 ###reference_###, 3 ###reference_### and 4 ###reference_###: In Section 2 ###reference_### we recall preliminary notions, most importantly nonstandard models of arithmetic. In Section 3 ###reference_### we give the definition of the wide limit, provide several examples and show how density of finite substructures corresponds to validity of existential sentences in the wide limit (Theorem 3.11 ###reference_thrm11###). And in Section 4 ###reference_### we prove that, under some reasonable assumptions, the wide limit can be expanded to a model of two sorted arithmetic (Theorem 4.8 ###reference_thrm8###).\nThe second part, consisting of Sections 5 ###reference_### and 6 ###reference_###, is about our application of the new concept. In Section 5 ###reference_###, we recall the definition of query TFNP and show that the wide limit of all black-box instances of WeakPigeon is a graph of an injective map from to . Finally, in Section 6 ###reference_###, we expand the wide limit to a model of two-sorted arithmetic, , where we verify the following properties:\n(Theorem 6.5 ###reference_thrm5###): the wide limit is an instance of WeakPigeon without a solution,\n(Theorem 6.9 ###reference_thrm9###): every instance of RetractionWeakPigeon has a solution,\n(Theorem 6.12 ###reference_thrm12###): open comprehension and open induction,\n(Theorem 6.13 ###reference_thrm13###): the theory ,\nand also refute the existence of a many-one reduction from the problem WeakPigeon to the problem RetractionWeakPigeon (Theorem 6.11 ###reference_thrm11###). This separation is not new and follows from known results, see Section 7 ###reference_### for more details, but our illustration gives the separation a clear semantic interpretation."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Preliminaries",
|
| 15 |
+
"text": "By graphs, we mean structures in a language with a single binary relation denoted which is antireflexive and symmetric as we only consider undirected graphs in this work. We will denote any particular graph by as it will be used in some sense as a sample of a discrete probability space. The edge relation of a particular graph will be denoted .\nIn the rest of this section, we recall notions needed for Kraj\u00ed\u010dek\u2019s forcing construction. A fundamental notion we use throughout the work is of nonstandard models of (true) arithmetic. Let be the language containing the names of all relations and functions on the natural numbers and let denote the set of true sentences in this language in the standard model . By classical results of logic there exist -structures in which all sentences from are valid but which are not isomorphic to . These are called nonstandard models (of ).\nAll nonstandard models of (and even much weaker theories) contain an isomorphic copy of as an initial segment. Therefore, we can assume that in fact all models we encounter satisfy . After considering a concrete nonstandard model (of ) we shall call the elements of nonstandard numbers. These can be intuitively understood as \u201cinfinite natural numbers\u201d. The key feature of those elements is that all functions and relations from are defined even on nonstandard numbers. This includes functions for coding sequences and sets by numbers, and therefore we can use notations like even for a nonstandard number . The notation then means that for each such that we have an object coded by a number in and that this whole sequence is coded by some number . For a nonstandard number coding a set we denote its nonstandard size (cardinality) by . In the case where we talk about a binary string the notation denotes the length of (which is nonstandard if is), and if is simply an element of we denote its bit length. The symbol in sequences implies indexing over standard natural numbers, and in limits has the standard meaning.\nIn the next section we will fix a nonstandard model which has the model theoretic property of being -saturated. There is a self-contained construction of such model in [11 ###reference_b11###, Appendix], but we never explicitly use the -saturation. The reason we require it is that without it, it might happen that the Boolean algebra we construct is not complete. A property, implied by our choice of language, which we do use explicitly is the following:\nFor any sequence of standard natural numbers, there is a function symbol in such that for all we have . This gives rise to a sequence which agrees with on elements with standard index, but also has elements with index for any . We call the nonstandard prolongation of and for any we will denote as simply .\nUsing this property, we will now allow ourselves to freely use any elements of as indices of sequences of standard numbers and generally any sequences of standard finite objects, which can indeed be coded by standard natural numbers.\nAny nonstandard model can be extended to an ordered ring by adding negative elements. This ring then can be extended to a fraction field . We shall call elements of -rationals. The field contains an isomorphic copy of as a substructure.\nWe shall use the structure analogously to how hyperreal numbers are used in nonstandard analysis. For more details about nonstandard analysis we recommend [5 ###reference_b5###] to the interested reader.\nWe call an element in with absolute value greater than all , infinite, otherwise we call it finite. We call elements in with absolute value smaller than all , infinitesimal.\nWe will denote the set of finite -rationals as and one can check it forms an ordered ring.\nThere is a function assigning to each finite -rational a real number. The function st is a ring homomorphism and the kernel of st is exactly the ideal of infinitesimal numbers. When is a finite -rational we call its standard part.\nThe following result characterizes convergence of sequences of rational numbers using the -rational numbers .\nLet be a sequence of standard rational numbers and let . Then the following are equivalent.\n\nFor every nonstandard we have: .\nThis theorem shows that computing a standard part of an -rational obtained from a nonstandard prolongation of some standard sequence is equivalent to computing the limit of this sequence. It will be apparent that computations emerging in the analysis of wide limits can be completed in a purely standard setting \u2014 by computing limits of specific probabilities associated with the wide sequence we consider. In this work, we mostly present the computations with nonstandard parameters, this seems to be natural in the context of our limit object and in some sense provides a corresponding shift in perspective: Instead of directly analyzing the sequence we start with, we perform calculations with the nonstandard numbers which enumerate elements at a concrete nonstandard index of the sequence. A reader interested in interpreting those computations in the standard setting can in most cases simply imagine the manipulations to be prefixed with:\n\u201cFor all sufficiently large standard \u2026\u201d\nIt is important for arithmetical forcing with random variables to consider discrete probability spaces of nonstandard size. We shall always use the uniform distribution on the samples, although this is not necessary for the general construction. Thus, the probability of an event coded by an element is then just the -rational number where is the set of all samples.\nWe conclude this section by restating classical inequalities used in this work using the nonstandard approach. Their validity follows from the usual inequalities and Theorem 2.2 ###reference_thrm2###.\nLet , and , then\nLet , then"
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "3",
|
| 19 |
+
"parent_section_id": null,
|
| 20 |
+
"section_name": "Wide Limits",
|
| 21 |
+
"text": "We shall define a wide limit of every sequence of the following form.\nA sequence of sets of graphs is called a wide sequence if the following holds:\nEvery graph has the vertex set .\n.\nBy abuse of notation we will simply talk about a wide sequence instead of . Since a wide limit is a Boolean-valued graph, we need to construct a Boolean algebra in which the truth evaluation of statements shall take place. For the construction of the Boolean algebra we will closely follow [11 ###reference_b11###, Chapter 1] albeit with slight changes. Let us fix now, for the rest of this work, an -saturated model of which we will denote and with it we fix a nonstandard number .\nWe define\nin words is the set of subsets of coded by an element in . This is a Boolean algebra and to each we assign an -rational which we call its counting measure.\nEven though is a Boolean algebra with a \u201cmeasure\u201d it is not a -algebra. Indeed, contains all singletons for standard, but the countable union of those singletons, the set of standard natural numbers , is not definable by overspill. However, if we had joins and meets of arbitrary subsets at our disposal it would allow us to interpret quantifiers in the Boolean-valued case, so we now want to \u2018tweak\u2019 this Boolean algebra.\nLet be the ideal of consisting of elements with infinitesimal counting measure. We define . Each element in is of the form , where , and we define . We will denote the maximal element of by 1 and the minimal element by 0.\nOne can easily check that is well-defined since for all it holds that . The measure is called the Loeb measure. Relying on the -saturation of , the following then holds.\nis a -algebra with a real valued measure . Moreover, is a complete Boolean algebra.\nIt is important to note that is the only element of with measure and similarly is the only element with measure . Also, for the inequality implies .\nWe now define precisely what we mean by the family of functions relative to which we will be taking the wide limit. This is still a part of Kraj\u00ed\u010dek\u2019s construction, we just modify it to make it compatible with our setup that starts with a wide sequence.\nFor every the set is finite and thus can be coded by a standard number. Therefore, there is a nonstandard prolongation of this sequence, and we can consider the set coded by the nonstandard number , which matches the value of the function symbol in describing the function when .\nLet be a wide sequence. We say that is a family of random variables on if every is a function coded by a number in with domain and taking values in . We say is an -vertex if for all it holds that . The set of all -vertices is denoted .\nIf the wide sequence is clear from context we just say is a family of random variables. This is for now everything we need to recall from [11 ###reference_b11###], and we can proceed to define the central object of our work.\nLet be a wide sequence and let be a family of random variables on . We define the wide limit as a -valued structure in the language consisting of a single binary relation symbol as follows. The universe of the wide limit is taken as the set of all -vertices. We now inductively define the truth values for all -sentences.\n\n\ncommutes with , and\n\n\nTo stress in which Boolean-valued structure is the truth evaluation taking place we will sometimes denote the evaluation , for Boolean-valued structures and respectively. Furthermore, if for some sentence we say is valid in .\nNote that since can be recovered from as the domain of its elements, the wide limit only depends on , strictly speaking. We keep in the notation to cover the situation where we have a very general family of functions (e.g. the family of polynomial functions ) which can be applied to every wide sequence. Thus, the notation means that is restricted to those functions which take elements of as an input even when possibly contains other functions too.\nThe potential variability of the parameter may also seem unnecessary and indeed in this section it is, but in Section 6 ###reference_### we will assume that is a power of two, which will allow us to more easily translate between the results about wide limits and reducibility of search problems."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "3.1",
|
| 25 |
+
"parent_section_id": "3",
|
| 26 |
+
"section_name": "Wide Limits for Shallow Decision Trees",
|
| 27 |
+
"text": "Now we shall define the first nontrivial family of random variables relative to which we shall take wide limits of several sequences. The functions in the family will be computed by shallow decision trees. So the shape of the wide limit reflects what can a tree of depth subexponential in witness in the wide sequence with probability arbitrarily close to .\nLet be a family444The subscript \u2018rud\u2019 stands for rudimentary. The name for the family is taken from [11 ###reference_b11###]. of labeled rooted binary trees in of the following form.\nAt each vertex the tree is labeled by an element of and the two outgoing edges incident to it are labeled as and respectively. The leaves are labeled by an element of . The depth of the tree is bounded by a number of a form (rounded to the nearest element of ) for some .\nA computation of a on some is defined as follows. Start at the root and interpret each label of the vertex as a question whether the pair is in the edge set and follow a path through reading as a positive answer and as a negative answer. The label of the leaf visited at the end of the path is the output of on , denoted .\nWe define to be the set of all functions computed by a tree .\nIn the following example, we consider a simple wide sequence of sets of graphs with exactly one edge.\nLet . Since any has only edge in all potential edges, it is not likely a shallow tree will find the edge. This is the idea behind the proof of the following claim.\nLet , we proceed by proving that\nwhich is enough to prove the theorem since\nLet and be computed by and respectively. Let the depth of both and be at most , where . Walk down from the root and always prolong the path along the edge labeled . On this path we have a set of at most different pairs of vertices as the labels of edges and a label of the leaf node .\nWe do the same for , and we find another set of at most pairs of vertices and a label of the leaf . The labels and are then combined to one last pair . Now we just need to compute the probability that none of these pairs of vertices are in the edge set .\nThere are different graphs in and at least graphs which fulfill our requirements, namely, those graphs whose sole edge is not incident with the vertices from the labels of the trees and . The probability is by Theorem 2.3 ###reference_thrm3### at least\nafter taking the standard part of the last line we get . Therefore, and ."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3.2",
|
| 31 |
+
"parent_section_id": "3",
|
| 32 |
+
"section_name": "Sufficient conditions for validity of universal and existential sentences",
|
| 33 |
+
"text": "To understand wide limits we need to compute the truth-values of sentences which describe properties whose complexity we are interested in. Generally, for sentences of arbitrary complexity, this can be hard. In this section we prove sufficient conditions at least for the validity of universal and existential sentences.\nWe will start with the simpler condition for the validity of universal sentences. This is important also because we would like to know that a wide limit of a wide sequence of graphs is also a graph: Meaning the statement that is antireflexive and symmetric is valid in the wide limit, and this statement is indeed expressible as a universal sentence.\nLet be a wide sequence and let be any family of random variables. Let be an open -formula and assume that\nThen .\nBy the assumption and Theorem 2.2 ###reference_thrm2### we get that .\nSince is open, we have for every tuple of -vertices that . Now\n\u220e\nLet be a wide sequence and any family of random variable, then is an -structure in which both antireflexivity and symmetry of is valid (i.e. is a Boolean-valued graph).\nNow to give a sufficient condition for the validity of an existential sentence we use the auxiliary value of density of defined as the probability that a random graph and a random tuple satisfy and show that the limiting density gives a lower bound for the measure of .\nLet be a wide sequence and let be a family of random variables which contains all constant functions. Let be an open -formula and let . Assume that\nwhere is sampled uniformly over all elements of . Then\nIn particular if then\nConsider an array indexed by and such that\nBy the assumption and induction in we have that\nWe now claim that there exists a specific such that . Assume for contradiction that the claim is false. Then\nwhere we pick555This is possible because being a model of satisfies induction. such that it maximizes . But after taking the standard part of the inequality we obtain that\nwhich is a contradiction and so the claim is true. Let be a tuple of constant functions which is at every sample equal to . We have\nand by taking of this inequality we finally obtain that .\n\u220e\nThe following example demonstrates that Theorem 3.9 ###reference_thrm9### cannot be generalized to a similar hypothesis as Theorem 3.11 ###reference_thrm11###.\nLet consist of all undirected graphs on the vertex set with exactly edges. One can see that\nbut in fact .\nLet such that is not bounded above by a standard number. Let be a tree which queries on all paths a fixed set of different potential edges. If we prove that any such set in has to contain at least one edge with probability infinitesimally close to then we can construct -vertices and using such that by simply taking and labeling each leaf on a path which finds an edge with either the lesser vertex (in the natural order of ) to compute , or with the greater vertex to compute .\nLet be the set of potential edges queried by and let . Now we have\nthe standard part of which can be, using Theorem 2.4 ###reference_thrm4###, for all bounded above by\nwhich tends to as .\nFor more examples, we point the interested reader to [9 ###reference_b9###]."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "4",
|
| 37 |
+
"parent_section_id": null,
|
| 38 |
+
"section_name": "Expanding Wide Limits to Two-Sorted Arithmetic",
|
| 39 |
+
"text": "In this section, we will show that under reasonable assumptions one can embed the wide limit into the original models of Kraj\u00ed\u010dek in such a way that first order statements, after a suitable translation, keep their truth values."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "4.1",
|
| 43 |
+
"parent_section_id": "4",
|
| 44 |
+
"section_name": "The structures",
|
| 45 |
+
"text": "We will now recall the construction of two-sorted models of weak arithmetic defined in [11 ###reference_b11###, Chapter 5]. We will take the liberty to define them as an extension of the definition of a wide limit to obtain structures 666This notation is just making some parameters of the construction explicit, the models constructed can be obtained by the original method without first constructing the wide limit. Our contribution is in observing that the truth values of first order sentences concerning the wide limit is preserved between the wide limit and the structure .. Under the right conditions, these result in a structure in some sublanguage of with two sorts: numbers and functions of bounded domain on numbers, and this latter sort contains the wide limit as an object. These sorts are to be named number sort and set sort, as the bounded functions can be interpreted as sets (or more generally relations) and every such function can be coded by a bounded set .\nLet . This determines a language which we get by keeping the original variables as number sort variables, adding to set sort variables whose intended interpretation are bounded functions and the equality symbol for set sort variables (denoted the same as for the number sort). All set sort variables are treated as function symbols and can form terms with the number sort terms as arguments.\nWe will also use the function sort variables as relation symbols, and we define the atomic formula to be evaluated the same as the formula\nWe will now fix a wide sequence and a family of random variables on which together determine a wide limit .\nWe define to be the subset of consisting of all numbers bounded above by for some .\nWe define to contain all relation symbols from and all functions from for which their values on any tuple of elements of is still in . We say is -closed if for every function symbol of arity and every we have that .\nNote that is then a substructure of the -reduct of .\nWe say that is a family of random functions (on ) if for every there is such that assigns to each a -ary function coded by an element in which maps a set into . Such is then called -ary.\nWe say is -compatible if for every , and a -ary we have that the function\n defined as\nis in fact in .\nAn example of a specific family of random functions will be provided in Section 6 ###reference_###.\nLet be an -closed family of random variables with values in . Let be an -compatible family of random functions. We define to be a -valued -structure with the number sort of the universe as and the set sort of the universe as . The valuation of formulas is then given by the following inductive definition. We define the valuation only for terms consisting of variables substituted with elements of , and because of the -closedness and -compatibility of , we can evaluate more complex terms step-by-step, eventually reaching an element of .\n, where\n, where the elements are from and is a relation symbol in\n, where\ncommutes with , and\n\n\n\n.\nLet us note that in general, it is possible that extensionality is not valid in ."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "4.2",
|
| 49 |
+
"parent_section_id": "4",
|
| 50 |
+
"section_name": "Preservation of sentences concerning the wide limit",
|
| 51 |
+
"text": "We will now prove (under a mild condition on ) that there is a set sort object in which faithfully encodes the wide limit . This lets us construct models in which an object with the same first order properties as the wide limit might be desired. Recall that every element , where is a family of random functions, actually determines a predicate which we evaluate the same as the formula .\nLet be a family of random functions. We say that the edge relation of the wide limit is represented in by if is binary and for all we have that\nWe say a family of random variables has restrictable ranges if for every and there is such that\nLet be a -sentence. Let be -closed and have restrictable ranges and let be -compatible. Let the edge relation of the wide limit be represented in by . We define to be the -sentence obtained by replacing all the occurrences of the relation symbol by , keeping the structure of the logical connectives and replacing all quantifiers by and by .\nThen we have that\nWe will prove that for all -formulas and all we have that\nWe proceed by induction on the complexity of the formula. The case for atomic formulas follows from Definition 4.6 ###reference_thrm6### and the step for logical connectives is also clear since commutes with them. With the induction step for negation in hand it is now enough to prove the induction step for the universal quantifier.\nWe now assume that the statement holds for a formula . By the restrictability of ranges in we get that for all there is such that\nWe have that for every :\nand by the validity of predicate logic\nwhich together implies\nwhich can be rewritten as\nMoreover, for every we have and thus every element of is of the form for some .\nClaim: For all we have:\nFrom we obtain\nFor the second direction, using the fact that contains exactly all , we have\nthis proves the claim.\nWith the claim established, we can finish the inductive step for the universal quantifier. Again using that consists exactly of all and the induction hypothesis, we have that for all :"
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "5",
|
| 55 |
+
"parent_section_id": null,
|
| 56 |
+
"section_name": "Total NP search problems and Wide Limits",
|
| 57 |
+
"text": "In this section we recall the class of search problems called Total NP Search problems, first defined in [16 ###reference_b16###], and then take a wide limit of all instances of WeakPigeon and show that when it is intepreted as an instance, it has no solution."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "5.1",
|
| 61 |
+
"parent_section_id": "5",
|
| 62 |
+
"section_name": "Preliminaries on Total NP Search problems",
|
| 63 |
+
"text": "A total NP search problem is a relation on binary strings for which the following two conditions are satisfied.\nVerifiability in polynomial time: There exists a Turing machine deciding whether in time polynomial in the lengths of and .\nTotality: There is a such that for every there is , is at most and .\nThe class of all such search problems is denoted TFNP.\nThere are many well-known TFNP problems, some of which were considered already in [16 ###reference_b16###], such as Leaf, which intuitively amounts to the task:\n\u201cGiven a graph with an odd-degree vertex, find another.\u201d\nor Pigeon intuitively described as:\n\u201cGiven a map , find distinct and such that .\u201d\nwhere denotes the set . The graph in the first problem is given by a circuit computing potential neighbors for each vertex, and the function in the second problem is given by a circuit computing its values. This makes the problems non-trivial as checking the whole or may take exponential time in the size of the given circuit.\nMore relevant for our setting is the variant of the class called black-box TFNP, originally defined as \u2018type TFNP\u2019 in [1 ###reference_b1###]. This variant allows the circuits in the input to be replaced by an oracle, for example, in the problem Leaf we instead obtain oracle access to the neighbor-sets of or in the problem Pigeon to the values of the function . We will start by defining a query tree, which is a natural computation model generalizing an oracle Turing machine. Query trees capture exactly the relationship between the values of the oracle and the computational states of the model. Usually such trees operate on relational oracles, see for example [6 ###reference_b6###], but in our setting function oracles are more natural. 777Moreover, function oracles, with values polynomially bounded in the size of the input, can be simulated by relational ones which can be queried for -th bit of the given function value. Every query tree operating on function oracle then can be transformed to a tree operating on relational oracle with at most polynomially larger depth.\nLet . A -query tree is a labeled rooted tree of the following shape:\nEach non-leaf node is labeled by a binary string of length .\nEach non-leaf node has for every an outgoing edge labeled by .\nEach leaf node is labeled by a binary string of length .\nThe depth of a tree is the length of the longest path starting at the root. If is a function which maps to , then the computation of on is the path obtained by starting at the root and continuing down the tree from each non-leaf node with some label to the next node along the edge labeled by , the output of the computation is then simply the label of the leaf met on the computation.\nFor a function oracle and a number we denote the restriction of to as .\nA total query NP search problem is a relation, where is a function oracle, and are binary strings, along with two functions and such that the following three conditions are satisfied.\nConditions on lengths: The functions and are polynomial time when the input is given in unary. For every , we have and for every we have that implies .\nVerifiability in polynomial depth: There is a polynomial , and for any binary strings and there exists a query tree of depth at most , such that for every we have if and only if .\nTotality: For each there is such that .\nThe pair satisfying the conditions on lengths is called an instance of , and the string is called the size parameter. The class of all such search problems is denoted .\nWe will be analyzing the following two problems.\nThe problem WeakPigeon is given as follows. Given and a function oracle with find distinct such that .\nThis problem is total because the size of the domain on a given length is larger than the size of the codomain. Let us now assume, that the input of a problem can be given by multiple function oracles, since these oracles can be represented as a single oracle computing all their values in parallel.\nThe problem RetractionWeakPigeon is given as follows. Given and function oracles and such that\nfind satisfying that .\nThe problem is total as a consequence of totality of WeakPigeon, if we have distinct such that then one of them have to already be a solution to WeakPigeon. This observation can be made precise using the notation of a many-one reduction. These reductions originally used oracle Turing machines (see [1 ###reference_b1###]), but as we already replaced oracle Turing machines by query trees we shall modify the definition accordingly.\nLet and be problems such that the length functions for are and and for they are and . We say that is many-one reducible to , denoted , if there is a function computed in polynomial-time and for each , there is a sequence of -query trees and a sequence of -query trees , such that the following is satisfied:\nFor every instance of , let denote the function oracle satisfying that is given by the value . Then, for every satisfying we have .\nIt is easy to check that . Regarding the other direction, it follows from known results (see Section 7 ###reference_### for details) that\nwe will give a new proof of this using wide limits in the rest in the remainder of Section 5 ###reference_### and Section 6 ###reference_###."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "5.2",
|
| 67 |
+
"parent_section_id": "5",
|
| 68 |
+
"section_name": "The wide limit of all instances of WeakPigeon",
|
| 69 |
+
"text": "The following wide sequence essentially consists of all instances of WeakPigeon. We will show, that relative to trees of subexponential depth which are allowed to ask for neighbors of vertices, the wide limit will be a graph of an injective function.\nLet be a wide sequence consisting of all -structures on , where is a graph of a function from to .\nNote, that for a fixed the neighbor of in is simply the image of in the function whose graph is .\nWe define as the set of all labeled rooted trees of the following shape:\nEach non-leaf node is labeled by some .\nFor each and a node there is an outgoing edge from labeled .\nEach leaf is labeled by some .\nThe depth of the tree is defined as the maximal number of edges in a path from the root, and we require it is at most (rounded to the nearest element of ) for some .\nThe computation of such a tree in on is defined as follows. We build a path by starting at the root and interpreting every node labeled by some as a question \u2018what is the neighbor of the vertex ?\u2019 and we follow the output edge with the answer and continue analogously until we find a leaf. The label of the leaf is defined to be the output of the computation.\nWe define to be the set of all functions on which are computed by some . Note that the depth of the trees is subexponential in .\nWe say a tree fails on if on the computation path the tree the neighbors of all distinct vertices are distinct, and if then also the neighbor of in is disjoint from the other neighbors.\nLet , then\nBy direct computation, we have that the probability of failure of tree of depth is\nwhere the last inequality follows from Theorem 2.3 ###reference_thrm3###. Since the depth of any tree in is bounded by , for some , we have that the lower bound is at least , which is infinitesimally close to .\n\u220e\nFor contradiction assume that\ntherefore, there are , such that there is and\ntherefore, as the evaluated sentence is quantifier free, we have\nAssume that are the trees computing and . Let us consider the tree which can be obtained by taking and replacing every leaf by a copy of the tree , and then appending to every new leaf by every possible labeled edge. By the fraction of paths of where the tree asks for neighbors of and and obtains is infinitesimally close to and thus the probability of failure of is not infinitesimally close to . This is in contradiction with Lemma 5.10 ###reference_thrm10###.\n\u220e"
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "6",
|
| 73 |
+
"parent_section_id": null,
|
| 74 |
+
"section_name": "Model for separation of RetractionWeakPigeon and WeakPigeon",
|
| 75 |
+
"text": "In this section, we will expand to the model , and show that RetractionWeakPigeon is total, but the problem WeakPigeon is not. We then show that this implies nonexistence of a many-one reduction from WeakPigeon to RetractionWeakPigeon."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "6.1",
|
| 79 |
+
"parent_section_id": "6",
|
| 80 |
+
"section_name": "Construction of the model",
|
| 81 |
+
"text": "Let us start by assuming for the rest of this work that is a power of two, this will allow us to easily convert between sets of binary strings and numbers. In the models we are working with there is a pairing function which codes pairs of numbers by a single number. Thus, we can represent functions of any finite arity by unary functions in . We use this to define the family .\nWe define to be the family of all random functions on which fulfill the following. For each unary there exists a tuple coded by an element of , such that and for\nFor every and -ary there is a -ary such that for every we have\nOne can also regard -ary functions from as those computed by -dimensional tuples (that is tuples indexed by -tuples) of elements of . To further explain the formalism, if we have unary and some the function is the function determined by the tuple of elements of with the property that if . Random function families defined using tuples of random variables are used frequently in [11 ###reference_b11###] and, more importantly, they generalize functions computable by an oracle Turing machine. This allows us to obtain nonexistence of many-one reducibility to RetractionWeakPigeon in Theorem 6.11 ###reference_thrm11###.\nWe define , an element of , as the random function computed by the tuple , where is computed by a tree of depth which queries the neighbor of and outputs if it is , and otherwise it outputs .\nhas restrictable ranges\nis -closed\nis -compatible\nthe edge relation of the wide limit is represented in by .\n1, 2: Here we can proceed simply by relabeling the leaves of the trees computing the functions from .\n3: Assume that is computed by a tuple and the depth of all trees computing is at most for some . For all we have that the tree , which we construct by appending to each leaf of the tree computing with label the tree , has depth at most for some and therefore . The tree computes by the definition of . Hence, is -compatible.\n4: By the definition of and , we have for every :\nThe model which we will analyze in the rest of this section is ."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "6.2",
|
| 85 |
+
"parent_section_id": "6",
|
| 86 |
+
"section_name": "Non-totality of WeakPigeon",
|
| 87 |
+
"text": "In this section, we will show that the formalization of the statement \u2018The problem WeakPigeon is total\u2019 is not true in the model . Since wide limits are defined on intervals of (non-standard) numbers and problems are defined on sets of binary strings, let us describe how these sets correspond to each other in our formalized statement. The input oracles will be represented by elements of the set sort and each set of binary strings , where , will be identified with the interval . Since is a power of two, the interval reserved for the values of -vertices can be identified with . We also obtain the bijection .\nWe will now define a formula which is a formalization of the statement that \u2018The values and are a solution to the WeakPigeon instance .\u2019 It is more natural in the arithmetic setting to accept as instances functions of arbitrary range and then allow inputs mapped outside as solutions. This variant is many-one reducible to the original problem, as we can remap the inputs outside to a fixed value in .\nLet be the following -formula:\nand to be the following -formula:\nLet us fix which is computed by the tuple , where the element is computed by the depth one tree, whose root is labeled and each leaf is labeled the same as its corresponding edge.\nClaim I: \nProof of claim. Since the sentence is universal, it is enough to check its validity on every , which in turn follows from the definitions of and .\nClaim II:\nProof of claim. This follows from Theorem 5.11 ###reference_thrm11### which shows the validity of the corresponding sentence in the wide limit , Lemma 6.3 ###reference_thrm3### and Theorem 4.8 ###reference_thrm8###.\nClaim III: \nProof of claim. By Claims I and II and validity of predicate logic.\nClaim IV: \nProof of claim. Again, by the universality we just need to check the validity at each , which follows as outputs neigbors of vertices in which are by the definition of always in the range .\nThe claims III and IV show the validity of the negations of all disjuncts of the formula . The formula is also obviously assigned the Boolean value 0. This implies that\nwhich implies the theorem by the Boolean evaluation of universal quantifiers.\n\u220e"
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "6.3",
|
| 91 |
+
"parent_section_id": "6",
|
| 92 |
+
"section_name": "Totality of RetractionWeakPigeon",
|
| 93 |
+
"text": "We again start by defining a formula which is a formalization of a statement: \u2018The value is a solution to the instance of the problem RetractionWeakPigeon.\u2019\nLet be the following -formula:\nand let be the following -formula:\nLet , and . We say that a tree fails for on if and either or\nFor every and there is a tree such that\nWe can assume that is computed by and by , such that for all : and that the depth of all the trees computing and is bounded by for some . For any and this can be achieved by padding any pair of tuples they are computed by the constant function in .\nWe will recursively construct trees indexed by elements of and then show that for sufficiently large value of satisfies the statement of the lemma. We will be constructing the tree by prolonging paths of previously constructed trees, where by a path we mean a complete path of nodes from the root all the way to some leaf. We say a sample is compatible with a path in a tree if the computation of the tree on is exactly .\nThe initial tree : The initial tree is the tree computing whose leaves we relabel as follows. For every path in such that all compatible with give , we have a non-failure of and we can keep the original label. On the other hand, for each path in such that all compatible with give we pick a label in the following way. Let be such a path and be the value obtained for any compatible with . For each compatible with we have\nbecause the image of has at most -many elements. This implies by an averaging argument that there is such that\nWe relabel each leaf at such a path to . This concludes the construction of and assures that and that the depth of is at most .\nThe recursive step: Let us now assume that and that was already constructed with the property that\nand that the depth of is at most . Moreover, the tree on each path determines a value and if this path fails, it also determines two -element sets and such that and form a pair of inverse functions on every compatible with . We also assume, that the leaf of every path is labeled by some element of . We will prolong along each path in two stages and relabel the leaves to obtain the tree .\nFirst prolongation:\nLet be a path in .\nIf the tree does not fail on any sample compatible with , we keep as it is. Otherwise, we replace the leaf by the tree computing and change the label of each new leaf from to the tuple and call all such new paths active. We call the resulting tree and by the assumption on the tree computing we get its depth is at most .\nSecond prolongation:\nLet be an active path in with its leaf labeled and let on any compatible with . If then after relabeling the leaf of to the value , the tree does not fail on any compatible with . If , we replace the leaf by the tree and change the label of each new leaf from to the triple . We will denote the resulting tree and again call its newly prolonged paths active. By the assumptions on the tree computing we have that the depth of is at most .\nRelabeling: We will now relabel the leaves of active paths of . Let be an active path of and let its leaf be labeled . If is distinct from we can relabel the leaf to the value , this makes the tree never fail along this path. Otherwise, and the tree so far have established a bijection between and computed by and on any compatible with . This implies, that for each compatible with we have\nwhere the is sampled uniformly in which by an averaging argument implies the existence of such that\nwe then relabel the leaf of to . After relabeling all active paths in this way, we obtain the tree .\nProperties of : The depth of is the same as of which is at most . During the relabeling, we have also established that the failing paths determine a bijection with one additional element added to both and of the corresponding path in and that the leaves are properly labeled. It remains to analyze the probability of failing. If a path in did not fail on some , any of its extensions in also did not fail on . Moreover, if did fail, we found a new label for each of its extensions in , which fails with probability at most . Therefore,\nConclusion: Let , then the depth of is at most . Moreover,\nstandard part of which is . Thus, satisfies the statement of the lemma.\n\u220e\nLet and , we will find such that\nwhich is enough to prove the theorem.\nLet be computed by the tree whose existence follows from Lemma 6.8 ###reference_thrm8### for and . The formula is interpreted for each as the statement claiming the tree computing not failing for on . Since it is a quantifier-free formula, it can be evaluated in by calculating its probability over the sample space which by Lemma 6.8 ###reference_thrm8### is infinitesimally close to . Therefore, the formula is valid in .\n\u220e"
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "6.4",
|
| 97 |
+
"parent_section_id": "6",
|
| 98 |
+
"section_name": "Nonreducibility of WeakPigeon to RetractionWeakPigeon",
|
| 99 |
+
"text": "In the next theorem, we will be working both with non-standard binary strings and non-standard numbers. We will define two pairs of functions which convert between these two types of objects. The first pair consists of the Wrap function w which converts binary strings to non-zero numbers by appending a leading and interpreting the string as a binary expansion of the resulting number. The inverse to w is the Unwrap function denoted u. These are then used to convert between the size parameter of the formula formalizing totality, either or , and the size parameter of the associated problem.\nThe second pair then provides an explicit bijection between the set and , or alternatively between and , depending on whether we start with a binary string or an element . The first function, Pad, denoted p simply takes an input number and outputs its binary expansion with enough leading zeros to obtain a string of required size, this required size is a second argument of the function. However, we will never write this second argument explicitly, and its value will always be clear from the context. The function Unpad, denoted up, is the inverse to p and simply takes a binary string and interprets it as a binary expansion of a number ignoring leading zeros.\n###table_1### We will begin by proving the following lemma, its proof is straightforward and it essentially shows that the formulas and properly formalize the fact that a given element is a solution to the respective search problem. We only state one direction for each of the problems, this simplifies the proof and exactly captures the properties needed for the proof of our separation.\nLet , and . If , there is a WeakPigeon instance , such that:\nFor every solution to , we have that\nLet , and . If\n,\nthe range of is a subset of ,\nthe range is a subset of ,\nthen the instance of the problem\nRetractionWeakPigeon satisfies:\nFor every satisfying we have that is a solution to .\n(1.) The function oracle can be defined as follows.\nany solution to is then either translated to a pair of distinct numbers mapped to the same element by or one of them is mapped outside and hence .\n(2.) This follows straight from the assumptions and the definition of the formula .\n\u220e\nThere is no many-one reduction from the problem WeakPigeon to the problem RetractionWeakPigeon.\nWe will proceed by contradiction, assume that WeakPigeon is many-one reducible to RetractionWeakPigeon. We will show that this implies\nwhich is in contradiction with Theorem 6.5 ###reference_thrm5###.\nThe existence of the reduction implies that there is a function from binary strings to binary strings and for each length-parameter to WeakPigeon we get the suitable sequences of query trees. Let us also define a variant of which operates on numbers: The function maps any non-zero to .\nLet , . We will use the reduction to construct some and such that the formula\nis valid in for any .\nLet us assume is computed by , , where each element is from and is computed by for each . First, we define as the function computed by the tree of , where we change the label of each leaf from to . By Lemma 6.10 ###reference_thrm10###, after the computation of the tree computing is determined, a set of WeakPigeon instances of the form is determined, where is compatible with the computation of . By the existence of the reduction, we also obtain the sequences of query trees and .\nOur goal now is to define sequences of trees which will define , , and . To do this, we first have to convert the query trees to trees. Assume, that is determined, therefore is determined and we obtain the sequences and . For each tree we define a tree by simulating each query with . That is, layer by layer, starting at the parents of leaves, replace each non-leaf node labeled with the tree computing and appending the subtrees from previous layer onto the leaves of the new copy with the same label as the edge they were originally incidental to. This can also be done with to obtain the tree . Let be a power of larger than any possible value of . For each we will construct the tree as follows: Start with the tree computing where each path in this tree with a leaf labeled satisfying gets replaced by the tree . Analogously, we also define to be the tree constructed from the tree computing by appending onto each path labeled by satisfying . Let us also assume, that the labels of and are numbers obtained from the original labels by the function up.\nSince RetractionWeakPigeon is a problem presented by two function oracles, there is a way to compute either of the functions and using a single input oracle which in our case is itself computed by the sequence . We will define , , and by specifying the elements of the tuples they are computed by which are denoted by the respective lowercase letters. We define to be computed by the tree , except we relabel its leaves to only keep the value of and similarly let be also computed by except we relabel it to only keep the value of . We also define to be computed by relabeled to keep only the value for the first part of the solution and to be computed by relabeled to keep only the second part of the solution.\nTo prove the validity of , which is a quantifier-free sentence, we can simply check the validity over every . After is fixed, the tuples\nare instances of the problems RetractionWeakPigeon and WeakPigeon respectively. Assume that the antecedent of is satisfied, therefore\nNotice that satisfies the assumptions (a), (b) and (c) of Lemma 6.10 ###reference_thrm10### by contruction, this shows that that is a solution to . By the existence of the reduction, we get that and form a solution to . And finally, the Lemma 6.10 ###reference_thrm10### implies that\nwhich concludes the proof of .\nFrom the validity of every instance of , Theorem 6.9 ###reference_thrm9### and validity of predicate logic we obtain"
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"section_id": "6.5",
|
| 103 |
+
"parent_section_id": "6",
|
| 104 |
+
"section_name": "Further properties of the constructed model",
|
| 105 |
+
"text": "In the rest of this section we state which additional principles hold in the model.\nLet be an open -formula with parameters from and . Then for every the open comprehension principle\nand the open induction principle\nare both valid in , where we interpret as the constant function always outputting in .\nThis can be proven completely analogously to [11 ###reference_b11###, Lemma 20.2.5], we will therefore only describe the basic idea. Let and let be an open formula with a single free variable with parameters from and . For each number the validity of for a given can be decided by a tree and the depth of all such trees is bounded above by . A tuple of elements of each computed by the corresponding tree then determines an element of which satisfies the comprehension principle for and . The induction principle follows directly from the comprehension principle by an application of a binary search procedure.\n\u220e\nLet us give a remark about the logical strength of the separation we gave. The language contains names for all the functions on which are polynomially bounded (the length of the output is bounded by a polynomial in the length of the input). This allows us to interpret the language , containing the names for every polynomial time algorithm with an oracle access to some polynomially bounded function , in the structure and therefore in . We define to be the theory consisting of all universal -formulas true for any interpretation of . The function may represent a function oracle, hence symbols from represent (the functions computed by) the polynomial time oracle machines with access to and is simply the set of all universal sentences true for such functions for any oracle.\nLet the function symbol be interpreted in by any . Then, all the axioms of are valid in .\nThe theory consists of all true universal statements for any interpretation of . Therefore, for any , any axiom of is true in , after is interpreted as , under any substitution of the universal quantifiers. Thus, any axiom of is also valid in .\n\u220e\nThis theorem is relevant for interpretation of our construction from the point of view of bounded arithmetic. The theory is the true universal extension of the relativized . The theory was first defined in [12 ###reference_b12###] as a first order extension of Cook\u2019s equational theory PV [2 ###reference_b2###]."
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"section_id": "7",
|
| 109 |
+
"parent_section_id": null,
|
| 110 |
+
"section_name": "Concluding remarks",
|
| 111 |
+
"text": "In this paper we have developed some basic theory around the new concept of a limit object, the wide limit. In the author\u2019s view, it provides a more elementary counterpart to Kraj\u00ed\u010dek\u2019s structures into which they can be expanded while preserving the values of sentences restricted to their universe (Theorem 4.8 ###reference_thrm8###). In particular, even though the ideas which are needed to prove our many-one separation (Theorem 6.11 ###reference_thrm11###) could be collected to a direct proof of this separation, the wide limit provides a concrete semantic interpretation of this separation: An observer, who queries the images of a random map only polynomially many times, will observe with high probability a map without a collision. This map is embodied by our limit object .\nThe problem RetractionWeakPigeon and its variations have been investigated in the context of TFNP (, in [8 ###reference_b8###],[14 ###reference_b14###], \u2018lossy code\u2019 in [10 ###reference_b10###]) and its relation to WeakPigeon follows from well-known results. A nonreducibility of WeakPigeon to a problem stronger than RetractionWeakPigeon, namely RetractionPigeon, follows from a lower bound on the Sherali-Adams proof system in [3 ###reference_b3###] and established connections between propositional proofs and total NP search problems (see [6 ###reference_b6###]).\nThe main reason why was the problem RetractionWeakPigeon total in the constructed model was that it had many solutions which could be checked in a constant number of queries. It would be interesting to see how this can be generalized for other total search problems.\nCharacterize which TFNP problems are total in .\nIn [14 ###reference_b14###] the totality of certain total search problems is relatively unprovable even while preserving quite a lot of induction. In our model only was verified. It would be interesting to investigate further how much induction can be verified in the models emerging as expansions of wide limits. In [11 ###reference_b11###] there are several models of the theory constructed. However, these are obtained by expanding the family and using a suitable switching lemma to obtain a form of quantifier elimination, which in our case makes the wide limit interpretation unnatural because for this construction a more complex sample space is used.\nA general direction of further research, which would be particularly interesting, would be to characterize valid sentences of some concrete wide limit without the direct construction and thus to prove upper and lower bounds for the family of functions being considered."
|
| 112 |
+
}
|
| 113 |
+
],
|
| 114 |
+
"appendix": [],
|
| 115 |
+
"tables": {},
|
| 116 |
+
"image_paths": {},
|
| 117 |
+
"validation": true,
|
| 118 |
+
"references": [
|
| 119 |
+
{
|
| 120 |
+
"1": {
|
| 121 |
+
"title": "The relative complexity of np search problems.",
|
| 122 |
+
"author": "Paul Beame, Stephen Cook, Jeff Edmonds, Russell Impagliazzo, and Toniann Pitassi.",
|
| 123 |
+
"venue": "In Proceedings of the Twenty-Seventh Annual ACM Symposium on Theory of Computing, STOC \u201995, page 303\u2013314, New York, NY, USA, 1995. Association for Computing Machinery.",
|
| 124 |
+
"url": null
|
| 125 |
+
}
|
| 126 |
+
},
|
| 127 |
+
{
|
| 128 |
+
"2": {
|
| 129 |
+
"title": "Feasibly constructive proofs and the propositional calculus (preliminary version).",
|
| 130 |
+
"author": "Stephen A. Cook.",
|
| 131 |
+
"venue": "In Proceedings of the Seventh Annual ACM Symposium on Theory of Computing, STOC \u201975, page 83\u201397, New York, NY, USA, 1975. Association for Computing Machinery.",
|
| 132 |
+
"url": null
|
| 133 |
+
}
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"3": {
|
| 137 |
+
"title": "Tight rank lower bounds for the sherali\u2013adams proof system.",
|
| 138 |
+
"author": "Stefan Dantchev, Barnaby Martin, and Mark Rhodes.",
|
| 139 |
+
"venue": "Theoretical Computer Science, 410(21-23):2054\u20132063, 2009.",
|
| 140 |
+
"url": null
|
| 141 |
+
}
|
| 142 |
+
},
|
| 143 |
+
{
|
| 144 |
+
"4": {
|
| 145 |
+
"title": "Probabilities on finite models.",
|
| 146 |
+
"author": "Ronald Fagin.",
|
| 147 |
+
"venue": "The Journal of Symbolic Logic, 41(1):50\u201358, 1976.",
|
| 148 |
+
"url": null
|
| 149 |
+
}
|
| 150 |
+
},
|
| 151 |
+
{
|
| 152 |
+
"5": {
|
| 153 |
+
"title": "Lecture notes on nonstandard analysis, 2014.",
|
| 154 |
+
"author": "Isaac Goldbring.",
|
| 155 |
+
"venue": "Available at https://www.math.uci.edu/~isaac/NSA%20notes.pdf (last accessed 9th of January 2023).",
|
| 156 |
+
"url": null
|
| 157 |
+
}
|
| 158 |
+
},
|
| 159 |
+
{
|
| 160 |
+
"6": {
|
| 161 |
+
"title": "Separations in proof complexity and tfnp.",
|
| 162 |
+
"author": "Mika G\u00f6\u00f6s, Alexandros Hollender, Siddhartha Jain, Gilbert Maystre, William Pires, Robert Robere, and Ran Tao.",
|
| 163 |
+
"venue": "Journal of the ACM, 71(4):1\u201345, 2024.",
|
| 164 |
+
"url": null
|
| 165 |
+
}
|
| 166 |
+
},
|
| 167 |
+
{
|
| 168 |
+
"7": {
|
| 169 |
+
"title": "Search problems and Bounded Arithmetic.",
|
| 170 |
+
"author": "J. Hanika.",
|
| 171 |
+
"venue": "PhD thesis, Charles University, Prague, 2004.",
|
| 172 |
+
"url": null
|
| 173 |
+
}
|
| 174 |
+
},
|
| 175 |
+
{
|
| 176 |
+
"8": {
|
| 177 |
+
"title": "Approximate counting by hashing in bounded arithmetic.",
|
| 178 |
+
"author": "Emil Je\u0159\u00e1bek.",
|
| 179 |
+
"venue": "Journal of Symbolic Logic, 74(3):829\u2013860, 2009.",
|
| 180 |
+
"url": null
|
| 181 |
+
}
|
| 182 |
+
},
|
| 183 |
+
{
|
| 184 |
+
"9": {
|
| 185 |
+
"title": "Pseudofinite structures and limits.",
|
| 186 |
+
"author": "Ond\u0159ej Je\u017eil.",
|
| 187 |
+
"venue": "Master\u2019s thesis, Charles University, Prague, 2022.",
|
| 188 |
+
"url": null
|
| 189 |
+
}
|
| 190 |
+
},
|
| 191 |
+
{
|
| 192 |
+
"10": {
|
| 193 |
+
"title": "Efficient low-space simulations from the failure of the weak pigeonhole principle.",
|
| 194 |
+
"author": "Oliver Korten.",
|
| 195 |
+
"venue": "Electronic Colloquium on Computational Complexity, TR-22, 2022.",
|
| 196 |
+
"url": null
|
| 197 |
+
}
|
| 198 |
+
},
|
| 199 |
+
{
|
| 200 |
+
"11": {
|
| 201 |
+
"title": "Forcing with random variables and proof complexity, volume 382 of London Mathematical Society Lecture Note Series.",
|
| 202 |
+
"author": "Jan Kraj\u00ed\u010dek.",
|
| 203 |
+
"venue": "Cambridge University Press, 2011.",
|
| 204 |
+
"url": null
|
| 205 |
+
}
|
| 206 |
+
},
|
| 207 |
+
{
|
| 208 |
+
"12": {
|
| 209 |
+
"title": "Bounded arithmetic and the polynomial hierarchy.",
|
| 210 |
+
"author": "Jan Kraj\u00ed\u010dek, Pavel Pudl\u00e1k, and Gaisi Takeuti.",
|
| 211 |
+
"venue": "Annals of Pure and Applied Logic, 52(1-2):143\u2013153, 1991.",
|
| 212 |
+
"url": null
|
| 213 |
+
}
|
| 214 |
+
},
|
| 215 |
+
{
|
| 216 |
+
"13": {
|
| 217 |
+
"title": "Limits of dense graph sequences.",
|
| 218 |
+
"author": "L\u00e1szl\u00f3 Lov\u00e1sz and Bal\u00e1zs Szegedy.",
|
| 219 |
+
"venue": "Journal of Combinatorial Theory, Series B, 96(6):933\u2013957, 2006.",
|
| 220 |
+
"url": null
|
| 221 |
+
}
|
| 222 |
+
},
|
| 223 |
+
{
|
| 224 |
+
"14": {
|
| 225 |
+
"title": "Typical forcings, np search problems and an extension of a theorem of riis.",
|
| 226 |
+
"author": "Moritz M\u00fcller.",
|
| 227 |
+
"venue": "Annals of Pure and Applied Logic, 172(4):102930, 2021.",
|
| 228 |
+
"url": null
|
| 229 |
+
}
|
| 230 |
+
},
|
| 231 |
+
{
|
| 232 |
+
"15": {
|
| 233 |
+
"title": "A model theory approach to structural limits.",
|
| 234 |
+
"author": "Jaroslav Ne\u0161et\u0159il and Patrice Ossona de Mendez.",
|
| 235 |
+
"venue": "Commentationes Mathematicae Universitatis Carolinae, 53:581\u2013603, 11 2012.",
|
| 236 |
+
"url": null
|
| 237 |
+
}
|
| 238 |
+
},
|
| 239 |
+
{
|
| 240 |
+
"16": {
|
| 241 |
+
"title": "On the complexity of the parity argument and other inefficient proofs of existence.",
|
| 242 |
+
"author": "Christos H. Papadimitriou.",
|
| 243 |
+
"venue": "Journal of Computer and System Sciences, 48(3):498\u2013532, 1994.",
|
| 244 |
+
"url": null
|
| 245 |
+
}
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"17": {
|
| 249 |
+
"title": "Flag algebras.",
|
| 250 |
+
"author": "Alexander A. Razborov.",
|
| 251 |
+
"venue": "The Journal of Symbolic Logic, 72(4):1239\u20131282, 2007.",
|
| 252 |
+
"url": null
|
| 253 |
+
}
|
| 254 |
+
},
|
| 255 |
+
{
|
| 256 |
+
"18": {
|
| 257 |
+
"title": "Structures interpretable in models of bounded arithmetic.",
|
| 258 |
+
"author": "Neil Thapen.",
|
| 259 |
+
"venue": "Annals of Pure and Applied Logic, 136(3):247\u2013266, 2005.",
|
| 260 |
+
"url": null
|
| 261 |
+
}
|
| 262 |
+
}
|
| 263 |
+
],
|
| 264 |
+
"url": "http://arxiv.org/html/2301.13603v3"
|
| 265 |
+
}
|
20241030/2303.10465v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2304.00910v4.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2304.00977v2.json
ADDED
|
@@ -0,0 +1,548 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Selective Reincarnation: Offline-to-Online Multi-Agent Reinforcement Learning",
|
| 3 |
+
"abstract": "\u2018Reincarnation\u2019 in reinforcement learning has been proposed as a formalisation of reusing prior computation from past experiments when training an agent in an environment. In this paper, we present a brief foray into the paradigm of reincarnation in the multi-agent (MA) context. We consider the case where only some agents are reincarnated, whereas the others are trained from scratch \u2013 selective reincarnation. In the fully-cooperative MA setting with heterogeneous agents, we demonstrate that selective reincarnation can lead to higher returns than training fully from scratch, and faster convergence than training with full reincarnation. However, the choice of which agents to reincarnate in a heterogeneous system is vitally important to the outcome of the training \u2013 in fact, a poor choice can lead to considerably worse results than the alternatives. We argue that a rich field of work exists here, and we hope that our effort catalyses further energy in bringing the topic of reincarnation to the multi-agent realm.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Reinforcement Learning (RL) is a field that has existed for many years, but has recently seen an explosion of interest and research efforts. Since the incorporation of deep neural networks into the paradigm (Mnih et al., 2013 ###reference_b22###), the community has witnessed success in a wide array of tasks, many of which previously seemed intractable (Silver et al., 2016 ###reference_b33###). A commonly-cited feat is achieving superhuman performance in various games, both classical (Schrittwieser et al., 2020 ###reference_b31###) and modern (Berner et al., 2019 ###reference_b5###; Wurman et al., 2022 ###reference_b44###). Such games can represent situations which are high-dimensional, combinatorially complex, and non-linear, and thus demonstrate the sophistication of the RL approach to sequential decision making. Even with the successes of single-agent RL, many real-world settings are inherently multi-agent, where multiple diverse agents act together in a shared environment. The success of Multi-Agent Reinforcement Learning (MARL) has been similarly captivating in this context, with demonstrations of emergence of high-level concepts such as coordination and teamwork (Samvelyan et al., 2019 ###reference_b29###), and even trade (Johanson et al., 2022 ###reference_b17###).\nDespite these victories, the discipline of RL still faces a series of fierce challenges when applied to real-world situations, not least the intense computation often required for training (Agarwal et al., 2022 ###reference_b2###). The multi-agent case, though highly applicable to the real world, is plagued further by problems of non-stationarity (Papoudakis et al., 2019 ###reference_b24###), partial observability (Papoudakis et al., 2021 ###reference_b25###), and the \u2018curse of dimensionality\u2019 (Du & Ding, 2021 ###reference_b8###). We postulate that RL, and MARL specifically, is a powerful tool to help us model, understand, and solve complex processes and phenomena. First, though, it is clear that these challenges must be mitigated.\nProgress is being made in this regard, across a host of research strategies such as transfer learning (Zhu et al., 2020 ###reference_b45###), ad hoc teamwork (Stone et al., 2010 ###reference_b35###), and zero-shot coordination (Hu et al., 2020 ###reference_b16###). Another crucial effort is to leverage prior computation, to avoid the unnecessary duplication of work. In a typical RL research project, an algorithm is trained tabula rasa \u2013 that is, without prior experience or encoded knowledge. Sometimes, such an approach is desirable: for example, it was the express intention of Silver et al. (2017 ###reference_b34###) to train their AlphaZero agent tabula rasa, for the sake of learning to play Go without learning from human data. However, in many practical settings, training from scratch every time is slow, expensive, and also unnecessary. For example, we may want to iterate on a problem or test out a new strategy, and do so quickly, without starting over in each case.\nIn this vein, Agarwal et al. (2022 ###reference_b2###) have recently proposed a formalisation of a research paradigm entitled \u2018Reincarnating RL,\u2019 where previous computation is reused in future work. These authors argue that large, real-world RL systems already take this approach, out of necessity, but in a way that is often ad hoc and informal. Through the creation of a reincarnation framework, not only does a researcher gain benefits in their own experiments, it further allows the field itself to be democratised \u2013 enabling the sharing of checkpoints, model weights, offline datasets, etc., to accelerate development. This dimension is particularly salient for low-resourced researchers, who can piggyback off the computing power available to large research labs. Reincarnation is certainly not a panacea for the real-world challenges of RL, but it does provide a springboard both for novel ideas and for new researchers to enter the field. We resonate with this call, and wish to motivate similarly for reincarnation in the MARL context.\nTo catalyse the excitement for this paradigm, we focus in this paper on a particular aspect of reincarnation that may be useful in MARL: selective reincarnation. To illustrate where such a situation is applicable, consider an example of controlling a large, complex industrial plant, consisting of an assortment of heterogeneous agents. Notice that this scenario is in the realm of real-world problems. Suppose we are training our system using a MARL algorithm with a decentralised controller, but this training is computationally expensive, on the order of days-long. Conceivably, we may notice that some agents in our system learn competently \u2013 perhaps their task is simpler, or the algorithmic design suits their intended behaviour; call these the X agents. Other agents might not fare as well and we would like to train them from scratch; call these the Y agents. We wish to find new strategies for the Y agents: maybe we ought to test a new exploration routine, a novel neural architecture, or a different framing of the problem. Instead of retraining the entire system from scratch after each change in our Y agent strategy, we wonder if we can selectively reincarnate the already-performant X agents and thereby enable faster training times or higher performance for the Y agents.\nIn this paper, we make three contributions. Firstly, we hope to usher in this nascent paradigm of reincarnation to MARL, where it is vitally needed. The underlying philosophy of leveraging prior computation already exists in the MARL setting (e.g. Kono et al. (2014 ###reference_b18###)), but we aim to begin formalising the field, as done by Agarwal et al. (2022 ###reference_b2###) for the single-agent case. Specifically, we formalise the concept of selective reincarnation. Secondly, we demonstrate interesting phenomena that arise during a preliminary selectively-reincarnated MARL experiment. We find that, with certain agent subsets, selective reincarnation can yield higher returns than training from scratch, and faster convergence than training with full reincarnation. Interestingly, other subsets result in the opposite: markedly worse returns. We present these results as a doorway to a rich landscape of ideas and open questions. Thirdly, we offer a codebase111Available at: https://github.com/instadeepai/selective-reincarnation-marl ###reference_eincarnation-marl### as a framework for selective reincarnation in MARL, from which other researchers can build."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Preliminaries",
|
| 15 |
+
"text": ""
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "2.1",
|
| 19 |
+
"parent_section_id": "2",
|
| 20 |
+
"section_name": "Multi-Agent Reinforcement Learning",
|
| 21 |
+
"text": "There are many different formulations for MARL tasks including competitive, cooperative and mixed settings. The focus of this work is on the cooperative setting. Fully cooperative MARL with shared rewards can be formulated as a decentralised partially observable Markov decision process (Dec-POMDP) (Bernstein et al., 2002 ###reference_b6###). A Dec-POMDP consists of a tuple , , , , , , where is the set of agents in the system and describes the true state of the system. The initial state distribution is given by . However, each agent receives only partial information from the environment in the form of observations given according to an emission function . At each timestep , each agent receives a local observation and chooses an action to form a joint action . Typically under partial observability, each agent maintains an observation history , or implicit memory, on which it conditions its policy , to perform action selection. The environment then transitions to a new state in response to the joint action and current state, according to the state transition function and provides a shared numerical reward to each agent according to a reward function . We define an agent\u2019s return as its discounted cumulative rewards over the episode timesteps, , where is a scalar discount factor controlling how myopic agents are with respect to rewards received in the future. The goal of MARL in a Dec-POMDP is to find a joint policy such that the return of each agent , following , is maximised with respect to the other agents\u2019 policies, . That is, we aim to find such that:"
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2.2",
|
| 25 |
+
"parent_section_id": "2",
|
| 26 |
+
"section_name": "Independent Q-Learning",
|
| 27 |
+
"text": "The Q-value function for a policy is the expected sum of discounted rewards obtained by choosing action at state and following thereafter. DQN (Mnih et al., 2013 ###reference_b22###) is an extension of Q-Learning (Watkins, 1989 ###reference_b42###) which learns the Q-function, approximated by a neural network with parameters , and follows an -greedy policy with respect to the learnt Q-function. One limitation of DQN is that it can only by applied to discrete action environments. DDPG (Lillicrap et al., 2016 ###reference_b19###), on the other hand, can be applied to continuous-action environments by learning a deterministic policy which is trained to output the action which maximises the learnt Q-function at a given state.\nTampuu et al. (2015 ###reference_b37###) showed that in a multi-agent setting such as Pong, independent DQN agents can successfully be trained to cooperate. Similarly, independent DDPG agents have successfully been trained in multi-agent environments (Lowe et al., 2017 ###reference_b20###).\nTo train independent DDPG agents in a Dec-POMDP we instantiate a Q-function for each agent , which conditions on each agent\u2019s own observation history and action . In addition, we also instantiate a policy network for each agent which takes agent observations and maps them to actions .\nEach agent\u2019s Q-function is independently trained to minimise the temporal difference (TD) loss, , on transition tuples, , sampled from its experience replay buffer collected during training, with respect to parameters :\nwhere and are delayed copies of the Q-network and policy network respectively, commonly referred to as the target networks.\nThe policy network is trained to predict, given an observation , the action that maximises the Q-function, which can be achieved by minimising the following policy loss with respect to parameters :\nTo improve the performance of independent learners in a Dec-POMDP, agents usually benefit from having memory (Hausknecht & Stone, 2015 ###reference_b15###). Accordingly, we can condition the Q-networks and policies on observation histories instead of just individual observations . In practice, we use a recurrent layer in the neural networks. In addition, to further stabilize learning, we use eligibility traces (Sutton & Barto, 2018 ###reference_b36###) in the form of , from Peng & Williams (1994 ###reference_b27###)."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3",
|
| 31 |
+
"parent_section_id": null,
|
| 32 |
+
"section_name": "Related Work",
|
| 33 |
+
"text": "The concept of reusing computation for learning in some capacity is neither new, nor constrained to the domain of RL. We feel that topics such as transfer learning to new tasks (Bozinovski & Fulgosi, 1976 ###reference_b7###), fine-tuning (e.g. Sharif Razavian et al. (2014 ###reference_b32###)), and post-deployment model updates222See Updatable Machine Learning (UpML) workshop: https://upml2022.github.io/ ###reference_upml2022.github.io/### fit into this broad philosophy. In RL specifically, the concept has also existed for some time (e.g. Fern\u00e1ndez & Veloso (2006 ###reference_b9###)), and other RL researchers are currently pursuing similar aims with different nomenclature (e.g. using offline RL as a \u2018launchpad\u2019 for online RL333See Offline RL workshop: https://offline-rl-neurips.github.io/2022/ ###reference_###). Indeed, Agarwal et al. (2022 ###reference_b2###) accurately highlight that their conception of the field of reincarnation is a formalisation of that which already exists.\nIn MARL, too, there are extant works with the flavour of reincarnation. For example, both Kono et al. (2014 ###reference_b18###) and Gao et al. (2021 ###reference_b12###) explored the concept of \u2018knowledge reuse\u2019 in MARL. The idea of lifelong learning (Nekoei et al., 2021 ###reference_b23###) fits similarly into this paradigm. Authors have also used offline pre-training in the MARL setting (e.g. Meng et al. (2021 ###reference_b21###)). In a large-scale instance, Vinyals et al. (2019 ###reference_b40###) naturally reused computation for the training of their AlphaStar system. Specifically, it is also interesting to note their concept of using agents to help train other agents with a \u2018league\u2019 algorithm. In a sense, this approach is somewhat similar to one of the anticipated benefits of selective reincarnation, where good agents can assist by teaching bad agents.\nNonetheless, we believe there has not yet been a formalisation of the field of multi-agent reincarnation, akin to the efforts done by Agarwal et al. (2022 ###reference_b2###). Moreover, it seems that being selective in the agent reincarnation choice is also a novel specification."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "4",
|
| 37 |
+
"parent_section_id": null,
|
| 38 |
+
"section_name": "Definitions",
|
| 39 |
+
"text": "In a MARL system (see Section 2.1 ###reference_###) with the set of agents, an agent is said to be reincarnated (Agarwal et al., 2022 ###reference_b2###) if it has access to some artefact from previous computation to help speed up training from scratch. Typically such an agent is called a student and the artefact from previous computation is called a teacher. The set of teacher artefacts in the system is denoted . There are several types of artefacts which can be used as teachers, including (but not limited to): teacher policies or , offline teacher datasets , and teacher model weights or .\nA selectively reincarnated MARL system with agents is one where agents are trained from scratch (i.e. tabula rasa) and agents are reincarnated (Agarwal et al., 2022 ###reference_b2###). The sets of reincarnated and tabula rasa agents are denoted and respectively. A MARL system with is said to be fully tabula rasa, whereas a system with is said to be fully reincarnated."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "5",
|
| 43 |
+
"parent_section_id": null,
|
| 44 |
+
"section_name": "Case Study: Selectively-Reincarnated Policy-to-Value MARL",
|
| 45 |
+
"text": "Agarwal et al. (2022 ###reference_b2###) presented a case study in policy-to-value RL (PVRL), where the goal is to accelerate training of a student agent given access to a sub-optimal teacher policy and some data from it. Similarly, we now present a case study in multi-agent PVRL, focusing on one of the methods invoked by Agarwal et al. (2022 ###reference_b2###), called \u2018Rehearsal\u2019 (G\u00fcl\u00e7ehre et al., 2020 ###reference_b14###).\nWe set up our experiments as follows. We use an independent DDPG (Lillicrap et al., 2016 ###reference_b19###) configuration, with some minor modifications to enable it to leverage offline teacher data for reincarnation. Specifically, we make two changes. Firstly, we compose each mini-batch of training data from 50% offline teacher data and 50% student replay data, similar to G\u00fcl\u00e7ehre et al. (2020 ###reference_b14###). This technique should give the student the benefit of seeing potentially high-reward transitions from the teacher, while also getting to see the consequences of its own actions from its replay data. Secondly, we add layer-norm to the critic network, to mitigate extrapolation error due to out-of-distribution actions, as per Ball et al. (2023 ###reference_b3###).\nFor the sake of the current question of selective reincarnation, we use the HalfCheetah environment, first presented by Wawrzynski (2007 ###reference_b43###), and later brought into the MuJuCo physics engine (Todorov et al., 2012 ###reference_b38###). Specifically, we focus on the variant introduced by Peng et al. (2021 ###reference_b26###) with their Multi-Agent MuJoCo (MAMuJoCo) framework, where each of the six degrees-of-freedom is controlled by a separate agent. We denote these six agents as the following: the back ankle (BA), the back knee (BK), the back hip (BH), the front ankle (FA), the front knee (FK), and the front hip (FH). This ordering corresponds to the array indices in the MAMuJoCo environment, from to respectively. We illustrate the HalfCheetah setup in the appendix, in Figure A.1 ###reference_###.\nFor the set of proficient teacher policies, we initially train on the 6-agent HalfCheetah using tabula-rasa independent DDPG over 1 million training steps, and store the experiences using the OG-MARL framework (Formanek et al., 2023 ###reference_b10###) so that they can be used as the teacher datasets. We then enumerate all combinations of agents for reincarnation, a total of subsets. With each subset, we retrain the system on HalfCheetah, where that particular group of agents gains access to their teachers offline data (i.e. they are reincarnated). For each combination, we train the system for timesteps, remove the teacher data, and then train for a further timesteps on student data alone. Each experiment is repeated over five seeds. For the \u2018maximum return\u2019 metric, we find the timestep at which the return, averaged over the five seeds, is highest. For the \u2018average return\u2019 metric, we average the return over all seeds and all timesteps. We use these metrics as proxies for performance and speed to convergence respectively."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "5.1",
|
| 49 |
+
"parent_section_id": "5",
|
| 50 |
+
"section_name": "Impact of Teacher Dataset Quality",
|
| 51 |
+
"text": "To begin with, we fully reincarnate the MARL system, giving all of the DDPG agents access to their teachers\u2019 datasets. Since the quality of the samples in the teacher\u2019s dataset likely has a marked impact on the learning process, we create two datasets for comparison: \u2018Good\u2019 and \u2018Good-Medium\u2019, where these names indicate the typical returns received across samples444\u2018Good\u2019 is created with roughly the last 20% of the various teachers\u2019 experiences from training, and \u2018Good-Medium\u2019 with the last 40%.. Figure A.2 ###reference_###, in the appendix, shows the distribution of the returns in these two datasets.\nWe run the fully reincarnated configuration with each of these datasets, along with a tabula rasa baseline. Figure 1 ###reference_### presents these results.\n###figure_1### Notice in Figure 1(a) ###reference_sf1### that providing access solely to \u2018Good\u2019 teacher data initially does not help speed up training and even seems to hamper it. It is only after around timesteps that we observe a dramatic peak in performance, thereafter significantly outperforming the tabula rasa system. In contrast, having additional \u2018Medium\u2019 samples enables higher returns from the beginning of training \u2013 converging faster than the solely \u2018Good\u2019 dataset.\nOne may be surprised by these results \u2013 that it takes the system some time to realise benefits from high-return teacher data. However, we postulate that when using the \u2018Good\u2019 dataset, the teacher data is narrowly focused around high-return strategies, yet the corresponding state and action distributions are likely very different to the students\u2019 own state and action distributions early in training. Consequently, the students struggle to leverage the teacher datasets until later in training, when the state-action distribution mismatch is minimised. This belief is evidenced by the results in Figure 1 ###reference_###, and further supports the notion that the quality of the teachers\u2019 datasets has an impact on the outcomes of reincarnation. We feel this research direction is itself a promising one for future works, which we discuss in more detail in our roadmap, in Section 6 ###reference_###. For the purposes of this investigation, focusing on selective reincarnation and not dataset quality, we simply report the remainder of our results using the \u2018Good-Medium\u2019 dataset. Nevertheless, for completeness, we run our experiments with both datasets, and provide these results publicly555Available at: https://api.wandb.ai/links/off-the-grid-marl-team/5yxrdt3q ###reference_rl-team/5yxrdt3q###."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "5.2",
|
| 55 |
+
"parent_section_id": "5",
|
| 56 |
+
"section_name": "Arbitrarily Selective Reincarnation",
|
| 57 |
+
"text": "We now focus on the core aspect of our investigation: selective reincarnation. Firstly, we approach the problem at a high-level by reincarnating of the agents and aggregating across all combinations for that . That is, we do not study which agents are selectively reincarnated for a given . For example, for , we reincarnate all pairs of agents in separate runs: , and then average those results. As an important point, notice that the count of combinations depends on , calculated as \u2013 e.g. there is just one way to reincarnate all six of the agents, but there are twenty ways to reincarnate three of the six agents. Accordingly, we average over a different count of runs depending on , which affects the magnitude of the standard-error metrics. We highlight this detail to warn against comparing the confidence values across these runs. The essence of these results, instead, is to show the mean performance curve.\nThe returns from these runs, computed over five seeds times combinations, is given in Figure 2 ###reference_###, with both the graphical plot and the tabular values reported.\n###figure_2### In Figure 2(a) ###reference_sf1###, we notice firstly that reincarnation enables higher returns. We already saw in Figure 1 ###reference_### that full reincarnation yields higher returns than tabula rasa, but we now see that a selectively-reincarnated setup also yields benefits \u2013 e.g. reincarnating with just half of the agents provides an improvement over tabula rasa. We do see that reincarnating with just one agent is somewhat detrimental in this case, with a slightly lower maximum return over the training period, but not significantly."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "5.3",
|
| 61 |
+
"parent_section_id": "5",
|
| 62 |
+
"section_name": "Targeted Selective Reincarnation Matters",
|
| 63 |
+
"text": "Though the results from Figure 2 ###reference_### are interesting, we now present a vital consideration: in a multi-agent system, even in the simpler homogeneous case, agents can sometimes assume dissimilar roles (e.g. Wang et al. (2020 ###reference_b41###) show the emergence of roles in various tasks). In the HalfCheetah environment particularly, we feel there are likely unique requirements for the ankle, knee, and hip joints, and that these differ across the front and back legs, in order for the cheetah to walk.\nIt is thus important that we compare, for a given , the results across various combinations. That is, e.g., compare reincarnating with , etc. Though we run experiments over all possible combinations, plotting these can quickly become unwieldly and difficult to study. Instead, we show here only the best and worst combinations for each , as ranked by the average return achieved. These plots can be seen in Figure 3 ###reference_###, with values tabulated in Table 1 ###reference_###. We release results for all combinations online666Available at: https://api.wandb.ai/links/off-the-grid-marl-team/5yxrdt3q ###reference_rl-team/5yxrdt3q###.\n###figure_3### ###figure_4### ###figure_5### ###figure_6### ###figure_7### ###figure_8### ###figure_9### ###figure_10### ###figure_11### ###figure_12### ###figure_13### We see in these results that the choice of which agents to reincarnate plays a significant role in the experiment\u2019s outcome. For example, consider the choice of reincarnating three agents, shown in Figure 3(d) ###reference_sf4###: selecting instead of increases the maximum return by 33%, and almost doubles the average return. Similar improvements exist for other values of .\nWe also notice an interesting pattern in the best subsets selected for reincarnation (denote the best subset for as ): as increases, agents are strictly added to the subset. That is, , , and so on. Moreover, for these best subset choices, the maximum returns monotonically increase with , up to full reincarnation.\nFor average returns, indicating the time to convergence, we see a similar trend \u2013 where increasing the number of reincarnated agents results in faster convergence. However, the exception to this pattern is for , where higher average returns are achieved than for full reincarnation, (see Table 1 ###reference_###). This result implies that it is possible for training to converge faster when selectively reincarnating instead of fully reincarnating \u2013 another potential benefit of the selective reincarnation framework.\nTo affirm these points, we use the MARL-eval tool from Gorsane et al. (2022 ###reference_b13###), built upon work by Agarwal et al. (2021 ###reference_b1###), to plot the associated performance profiles, probability of improvement graphs, and aggregate scores, in Figure 4 ###reference_###.\n###figure_14### ###figure_15### ###figure_16### We use these results as clear evidence of the following: selective reincarnation can yield benefits, with higher returns and faster convergence over tabula rasa and possibly even full reincarnation; but one must be very careful of which agents are selected, for a bad choice can lead to a sub-optimal outcome.\nNaturally, this diagnosis opens up many further questions. How can we know, ideally a priori, whether a given combination is a poor or excellent one? In this example of the HalfCheetah environment, we might try to reason about various combinations: e.g, from Figure 3(f) ###reference_sf6###, we see that reincarnating the back leg, front hip, and front knee is a significantly better choice than the the back leg, the front hip, and the front ankle \u2013 does this result perhaps reveal something about the nature of how HalfCheetah learns? We show some other interesting groupings in the appendix, in Figure A.3 ###reference_###."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "6",
|
| 67 |
+
"parent_section_id": null,
|
| 68 |
+
"section_name": "Roadmap for Multi-Agent Reincarnation",
|
| 69 |
+
"text": "We now present a brief roadmap of some avenues to explore in this domain.\nSelective Reincarnation in MARL. There are many other conceivable methods for doing selective reincarnation in MARL which we did not explore. In this work we focused on a method similar to \u2018rehearsal\u2019 (G\u00fcl\u00e7ehre et al., 2020 ###reference_b14###), but future works could experiment with methods such as \u2018jump-starting\u2019 (Uchendu et al., 2022 ###reference_b39###), \u2018kick-starting\u2019 (Schmitt et al., 2018 ###reference_b30###) and offline pre-training. We find offline pre-training a particularly promising direction for selectively reincarnating systems of independent DDPG agents \u2013 e.g. one could apply a behaviour cloning regularisation term to the policy loss in DDPG, as per Fujimoto & Gu (2021 ###reference_b11###), and then to wean it off during training, as per Beeson & Montana (2022 ###reference_b4###). Another direction could be to develop bespoke selective reincarnation methods; for example, a method to enable agents to \u2018trust\u2019 those agents with a teacher more than they would otherwise. Additionally, there is a trove of work to be done in how to understand which agents have the highest impact when reincarnated, and perhaps to reason about this delineation a priori. Finally, we also encourage larger-scale selective-reincarnation experiments on a wider variety of environments, and perhaps even tests with real-world systems.\nBeyond Independent Reincarnation.\nIn this paper, we focused on using independent DDPG for learning in MARL, but we believe many valuable open-problems exist outside of such an approach. For example, how does one effectively reincarnate MARL algorithms that belong to the paradigm of Centralised Training Decentralised Execution (CTDE), such as MADDPG (Lowe et al., 2017 ###reference_b20###) and QMIX (Rashid et al., 2020 ###reference_b28###)? It is not clear how one might selectively reincarnate agents with a centralised critic. In general, outside of just selective reincarnation, we also showed evidence that the quality of the teacher policy and data can have a significant impact on the outcomes of reincarnation in RL. Exploring the benefits of, e.g., a curriculum-based, student-aware teacher could be a direction for future work. One could also explore ideas of curricula in the algorithm design itself \u2013 e.g. solely training the reincarnated agents\u2019 critics but freezing their policies, until the other agents \u2018catch up.\u2019 Another question about reincarnation in MARL is how teachers can help students learn to cooperate more quickly. Learning cooperative strategies in MARL can often take a lot of exploration and experience. Could reincarnating in MARL help reduce the computational burden of learning cooperative strategies from scratch? Many exciting avenues exist, and we envision the community exploring interesting open problems in this space."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "7",
|
| 73 |
+
"parent_section_id": null,
|
| 74 |
+
"section_name": "Conclusion",
|
| 75 |
+
"text": "In this paper, we explored the topic of reincarnation (Agarwal et al., 2022 ###reference_b2###), where prior computation is reused for future experiments, within the context of multi-agent reinforcement learning. Specifically, we proposed the idea of selective reincarnation for this domain, where not all the agents in the system are reincarnated. To motivate this idea, we presented a case study using the HalfCheetah environment, and found that selective reincarnation can result in higher returns than if all agents learned from scratch, and faster convergence than if all agents were reincarnated. However, we found that the choice of which agents to reincarnate played a significant role in the benefits observed, and we presented this point as the core takeaway. We used these results to argue that a fruitful field of work exists here, and finally listed some avenues that may be worth exploring as a next step."
|
| 76 |
+
}
|
| 77 |
+
],
|
| 78 |
+
"appendix": [
|
| 79 |
+
{
|
| 80 |
+
"section_id": "Appendix 1",
|
| 81 |
+
"parent_section_id": null,
|
| 82 |
+
"section_name": "Appendix A Appendix",
|
| 83 |
+
"text": "###figure_17### ###figure_18### ###figure_19### ###figure_20### ###figure_21### ###figure_22### ###figure_23### ###figure_24### ###figure_25### ###figure_26### ###figure_27###"
|
| 84 |
+
}
|
| 85 |
+
],
|
| 86 |
+
"tables": {
|
| 87 |
+
"1": {
|
| 88 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S5.T1\">\n<div class=\"ltx_inline-block ltx_align_center ltx_transformed_outer\" id=\"S5.T1.41\" style=\"width:337.9pt;height:156.2pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(-84.2pt,38.9pt) scale(0.667336606298763,0.667336606298763) ;\">\n<table class=\"ltx_tabular ltx_align_middle\" id=\"S5.T1.41.41\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S5.T1.41.41.42.1\">\n<td class=\"ltx_td\" id=\"S5.T1.41.41.42.1.1\" style=\"padding:4pt 15.0pt;\"></td>\n<td class=\"ltx_td\" id=\"S5.T1.41.41.42.1.2\" style=\"padding:4pt 15.0pt;\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S5.T1.41.41.42.1.3\" style=\"padding:4pt 15.0pt;\"><em class=\"ltx_emph ltx_font_italic\" id=\"S5.T1.41.41.42.1.3.1\">Configuration</em></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S5.T1.41.41.42.1.4\" style=\"padding:4pt 15.0pt;\"><em class=\"ltx_emph ltx_font_italic\" id=\"S5.T1.41.41.42.1.4.1\">Maximum Returns</em></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S5.T1.41.41.42.1.5\" style=\"padding:4pt 15.0pt;\"><em class=\"ltx_emph ltx_font_italic\" id=\"S5.T1.41.41.42.1.5.1\">Average Returns</em></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.3.3.3\">\n<td class=\"ltx_td ltx_border_tt\" id=\"S5.T1.3.3.3.4\" style=\"padding:4pt 15.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S5.T1.1.1.1.1\" style=\"padding:4pt 15.0pt;\"></td>\n<td class=\"ltx_td ltx_align_left ltx_border_tt\" id=\"S5.T1.3.3.3.5\" style=\"padding:4pt 15.0pt;\">Tabula Rasa</td>\n<td class=\"ltx_td ltx_align_right ltx_border_tt\" id=\"S5.T1.2.2.2.2\" style=\"padding:4pt 15.0pt;\"></td>\n<td class=\"ltx_td ltx_align_right ltx_border_tt\" id=\"S5.T1.3.3.3.3\" style=\"padding:4pt 15.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.7.7.7\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T1.4.4.4.1\" rowspan=\"2\" style=\"padding:4pt 15.0pt;\"><span class=\"ltx_text\" id=\"S5.T1.4.4.4.1.1\"><img alt=\"[Uncaptioned image]\" class=\"ltx_graphics ltx_img_landscape\" height=\"61\" id=\"S5.T1.4.4.4.1.1.g1\" src=\"x9.png\" width=\"190\"/></span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T1.5.5.5.2\" style=\"padding:4pt 15.0pt;\"></td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S5.T1.7.7.7.5\" style=\"padding:4pt 15.0pt;\"><span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.7.7.7.5.1\">BH</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S5.T1.6.6.6.3\" style=\"padding:4pt 15.0pt;\"></td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S5.T1.7.7.7.4\" style=\"padding:4pt 15.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.10.10.10\">\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T1.8.8.8.1\" style=\"padding:4pt 15.0pt;\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S5.T1.10.10.10.4\" style=\"padding:4pt 15.0pt;\"><span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.10.10.10.4.1\">FA</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S5.T1.9.9.9.2\" style=\"padding:4pt 15.0pt;\"></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S5.T1.10.10.10.3\" style=\"padding:4pt 15.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.14.14.14\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T1.11.11.11.1\" rowspan=\"2\" style=\"padding:4pt 15.0pt;\"><span class=\"ltx_text\" id=\"S5.T1.11.11.11.1.1\"><img alt=\"[Uncaptioned image]\" class=\"ltx_graphics ltx_img_landscape\" height=\"61\" id=\"S5.T1.11.11.11.1.1.g1\" src=\"x10.png\" width=\"190\"/></span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T1.12.12.12.2\" style=\"padding:4pt 15.0pt;\"></td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S5.T1.14.14.14.5\" style=\"padding:4pt 15.0pt;\">\n<span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.14.14.14.5.1\">BH</span>, <span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.14.14.14.5.2\">FK</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S5.T1.13.13.13.3\" style=\"padding:4pt 15.0pt;\"></td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S5.T1.14.14.14.4\" style=\"padding:4pt 15.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.17.17.17\">\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T1.15.15.15.1\" style=\"padding:4pt 15.0pt;\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S5.T1.17.17.17.4\" style=\"padding:4pt 15.0pt;\">\n<span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.17.17.17.4.1\">FA</span>, <span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.17.17.17.4.2\">FH</span>\n</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S5.T1.16.16.16.2\" style=\"padding:4pt 15.0pt;\"></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S5.T1.17.17.17.3\" style=\"padding:4pt 15.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.21.21.21\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T1.18.18.18.1\" rowspan=\"2\" style=\"padding:4pt 15.0pt;\"><span class=\"ltx_text\" id=\"S5.T1.18.18.18.1.1\"><img alt=\"[Uncaptioned image]\" class=\"ltx_graphics ltx_img_landscape\" height=\"61\" id=\"S5.T1.18.18.18.1.1.g1\" src=\"x11.png\" width=\"190\"/></span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T1.19.19.19.2\" style=\"padding:4pt 15.0pt;\"></td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S5.T1.21.21.21.5\" style=\"padding:4pt 15.0pt;\">\n<span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.21.21.21.5.1\">BH</span>, <span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.21.21.21.5.2\">FK</span>, <span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.21.21.21.5.3\">FH</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S5.T1.20.20.20.3\" style=\"padding:4pt 15.0pt;\"></td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S5.T1.21.21.21.4\" style=\"padding:4pt 15.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.24.24.24\">\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T1.22.22.22.1\" style=\"padding:4pt 15.0pt;\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S5.T1.24.24.24.4\" style=\"padding:4pt 15.0pt;\">\n<span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.24.24.24.4.1\">BA</span>, <span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.24.24.24.4.2\">BK</span>, <span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.24.24.24.4.3\">FK</span>\n</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S5.T1.23.23.23.2\" style=\"padding:4pt 15.0pt;\"></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S5.T1.24.24.24.3\" style=\"padding:4pt 15.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.28.28.28\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T1.25.25.25.1\" rowspan=\"2\" style=\"padding:4pt 15.0pt;\"><span class=\"ltx_text\" id=\"S5.T1.25.25.25.1.1\"><img alt=\"[Uncaptioned image]\" class=\"ltx_graphics ltx_img_landscape\" height=\"61\" id=\"S5.T1.25.25.25.1.1.g1\" src=\"x12.png\" width=\"190\"/></span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T1.26.26.26.2\" style=\"padding:4pt 15.0pt;\"></td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S5.T1.28.28.28.5\" style=\"padding:4pt 15.0pt;\">\n<span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.28.28.28.5.1\">BH</span>, <span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.28.28.28.5.2\">FK</span>, <span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.28.28.28.5.3\">FH</span>, <span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.28.28.28.5.4\">BK</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S5.T1.27.27.27.3\" style=\"padding:4pt 15.0pt;\"></td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S5.T1.28.28.28.4\" style=\"padding:4pt 15.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.31.31.31\">\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T1.29.29.29.1\" style=\"padding:4pt 15.0pt;\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S5.T1.31.31.31.4\" style=\"padding:4pt 15.0pt;\">\n<span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.31.31.31.4.1\">BA</span>, <span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.31.31.31.4.2\">BK</span>, <span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.31.31.31.4.3\">FA</span>, <span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.31.31.31.4.4\">FH</span>\n</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S5.T1.30.30.30.2\" style=\"padding:4pt 15.0pt;\"></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S5.T1.31.31.31.3\" style=\"padding:4pt 15.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.35.35.35\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T1.32.32.32.1\" rowspan=\"2\" style=\"padding:4pt 15.0pt;\"><span class=\"ltx_text\" id=\"S5.T1.32.32.32.1.1\"><img alt=\"[Uncaptioned image]\" class=\"ltx_graphics ltx_img_landscape\" height=\"61\" id=\"S5.T1.32.32.32.1.1.g1\" src=\"x13.png\" width=\"190\"/></span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T1.33.33.33.2\" style=\"padding:4pt 15.0pt;\"></td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S5.T1.35.35.35.5\" style=\"padding:4pt 15.0pt;\">\n<span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.35.35.35.5.1\">BH</span>, <span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.35.35.35.5.2\">FK</span>, <span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.35.35.35.5.3\">FH</span>, <span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.35.35.35.5.4\">BK</span>, <span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.35.35.35.5.5\">BA</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S5.T1.34.34.34.3\" style=\"padding:4pt 15.0pt;\"></td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S5.T1.35.35.35.4\" style=\"padding:4pt 15.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.38.38.38\">\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T1.36.36.36.1\" style=\"padding:4pt 15.0pt;\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S5.T1.38.38.38.4\" style=\"padding:4pt 15.0pt;\">\n<span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.38.38.38.4.1\">BA</span>, <span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.38.38.38.4.2\">BK</span>, <span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.38.38.38.4.3\">FA</span>, <span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.38.38.38.4.4\">FH</span>, <span class=\"ltx_text ltx_font_typewriter\" id=\"S5.T1.38.38.38.4.5\">BH</span>\n</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S5.T1.37.37.37.2\" style=\"padding:4pt 15.0pt;\"></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S5.T1.38.38.38.3\" style=\"padding:4pt 15.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.41.41.41\">\n<td class=\"ltx_td ltx_border_t\" id=\"S5.T1.41.41.41.4\" style=\"padding:4pt 15.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T1.39.39.39.1\" style=\"padding:4pt 15.0pt;\"></td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S5.T1.41.41.41.5\" style=\"padding:4pt 15.0pt;\">Fully Reincarnated</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S5.T1.40.40.40.2\" style=\"padding:4pt 15.0pt;\"></td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S5.T1.41.41.41.3\" style=\"padding:4pt 15.0pt;\"></td>\n</tr>\n</tbody>\n</table>\n</span></div>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 1: </span>Return values for the best and worst runs for a given number of selectively reincarnated agents. An asterisk () indicates the highest value in each column. Values are given with one standard error.</figcaption>\n</figure>",
|
| 89 |
+
"capture": "Table 1: Return values for the best and worst runs for a given number of selectively reincarnated agents. An asterisk () indicates the highest value in each column. Values are given with one standard error."
|
| 90 |
+
}
|
| 91 |
+
},
|
| 92 |
+
"image_paths": {
|
| 93 |
+
"1(a)": {
|
| 94 |
+
"figure_path": "2304.00977v2_figure_1(a).png",
|
| 95 |
+
"caption": "(a) Training curves\nFigure 1: Performance using the two different teacher datasets. In the plot, a solid line indicates the mean value over the runs, and the shaded region indicates one standard error above and below the mean. In the table, values are given with one standard error.",
|
| 96 |
+
"url": "http://arxiv.org/html/2304.00977v2/x1.png"
|
| 97 |
+
},
|
| 98 |
+
"2(a)": {
|
| 99 |
+
"figure_path": "2304.00977v2_figure_2(a).png",
|
| 100 |
+
"caption": "(a) Plot over training period\nFigure 2: Selective reincarnation performance, aggregated over the number of agents reincarnated. In the plot, a solid line indicates the mean value over the runs, and the shaded region indicates one standard error above and below the mean. In the table, values are given with one standard error. A reminder: take caution when comparing the standard error metrics across values of x\ud835\udc65xitalic_x, since the number of runs depends on (6x)binomial6\ud835\udc65{{6}\\choose{x}}( binomial start_ARG 6 end_ARG start_ARG italic_x end_ARG ).",
|
| 101 |
+
"url": "http://arxiv.org/html/2304.00977v2/x2.png"
|
| 102 |
+
},
|
| 103 |
+
"3(a)": {
|
| 104 |
+
"figure_path": "2304.00977v2_figure_3(a).png",
|
| 105 |
+
"caption": "(a) Baselines\nFigure 3: Training curves for the best and worst combinations of reincarnated agents, decided by the average episode return achieved. A solid line indicates the mean value over five seeds, and the shaded region indicates one standard error above and below the mean. In Figures 3(b) to 3(f), the green and red lines indicate the maximum return achieved by the tabula rasa and fully-reincarnated approaches respectively.",
|
| 106 |
+
"url": "http://arxiv.org/html/2304.00977v2/x3.png"
|
| 107 |
+
},
|
| 108 |
+
"3(b)": {
|
| 109 |
+
"figure_path": "2304.00977v2_figure_3(b).png",
|
| 110 |
+
"caption": "(b) One reincarnated agent\nFigure 3: Training curves for the best and worst combinations of reincarnated agents, decided by the average episode return achieved. A solid line indicates the mean value over five seeds, and the shaded region indicates one standard error above and below the mean. In Figures 3(b) to 3(f), the green and red lines indicate the maximum return achieved by the tabula rasa and fully-reincarnated approaches respectively.",
|
| 111 |
+
"url": "http://arxiv.org/html/2304.00977v2/x4.png"
|
| 112 |
+
},
|
| 113 |
+
"3(c)": {
|
| 114 |
+
"figure_path": "2304.00977v2_figure_3(c).png",
|
| 115 |
+
"caption": "(c) Two reincarnated agents\nFigure 3: Training curves for the best and worst combinations of reincarnated agents, decided by the average episode return achieved. A solid line indicates the mean value over five seeds, and the shaded region indicates one standard error above and below the mean. In Figures 3(b) to 3(f), the green and red lines indicate the maximum return achieved by the tabula rasa and fully-reincarnated approaches respectively.",
|
| 116 |
+
"url": "http://arxiv.org/html/2304.00977v2/x5.png"
|
| 117 |
+
},
|
| 118 |
+
"3(d)": {
|
| 119 |
+
"figure_path": "2304.00977v2_figure_3(d).png",
|
| 120 |
+
"caption": "(d) Three reincarnated agents\nFigure 3: Training curves for the best and worst combinations of reincarnated agents, decided by the average episode return achieved. A solid line indicates the mean value over five seeds, and the shaded region indicates one standard error above and below the mean. In Figures 3(b) to 3(f), the green and red lines indicate the maximum return achieved by the tabula rasa and fully-reincarnated approaches respectively.",
|
| 121 |
+
"url": "http://arxiv.org/html/2304.00977v2/x6.png"
|
| 122 |
+
},
|
| 123 |
+
"3(e)": {
|
| 124 |
+
"figure_path": "2304.00977v2_figure_3(e).png",
|
| 125 |
+
"caption": "(e) Four reincarnated agents\nFigure 3: Training curves for the best and worst combinations of reincarnated agents, decided by the average episode return achieved. A solid line indicates the mean value over five seeds, and the shaded region indicates one standard error above and below the mean. In Figures 3(b) to 3(f), the green and red lines indicate the maximum return achieved by the tabula rasa and fully-reincarnated approaches respectively.",
|
| 126 |
+
"url": "http://arxiv.org/html/2304.00977v2/x7.png"
|
| 127 |
+
},
|
| 128 |
+
"3(f)": {
|
| 129 |
+
"figure_path": "2304.00977v2_figure_3(f).png",
|
| 130 |
+
"caption": "(f) Five reincarnated agents\nFigure 3: Training curves for the best and worst combinations of reincarnated agents, decided by the average episode return achieved. A solid line indicates the mean value over five seeds, and the shaded region indicates one standard error above and below the mean. In Figures 3(b) to 3(f), the green and red lines indicate the maximum return achieved by the tabula rasa and fully-reincarnated approaches respectively.",
|
| 131 |
+
"url": "http://arxiv.org/html/2304.00977v2/x8.png"
|
| 132 |
+
},
|
| 133 |
+
"4(a)": {
|
| 134 |
+
"figure_path": "2304.00977v2_figure_4(a).png",
|
| 135 |
+
"caption": "(a) Performance Profiles\nFigure 4: MARL-eval (Gorsane et al., 2022; Agarwal et al., 2021) plots comparing the best performing combination, based on final performance after 250\u2062k250\ud835\udc58250k250 italic_k training steps, of x\ud835\udc65xitalic_x reincarnated agents for each x\u2208[0,n]\ud835\udc650\ud835\udc5bx\\in[0,n]italic_x \u2208 [ 0 , italic_n ].",
|
| 136 |
+
"url": "http://arxiv.org/html/2304.00977v2/x14.png"
|
| 137 |
+
},
|
| 138 |
+
"4(b)": {
|
| 139 |
+
"figure_path": "2304.00977v2_figure_4(b).png",
|
| 140 |
+
"caption": "(b) Probability of Improvement\nFigure 4: MARL-eval (Gorsane et al., 2022; Agarwal et al., 2021) plots comparing the best performing combination, based on final performance after 250\u2062k250\ud835\udc58250k250 italic_k training steps, of x\ud835\udc65xitalic_x reincarnated agents for each x\u2208[0,n]\ud835\udc650\ud835\udc5bx\\in[0,n]italic_x \u2208 [ 0 , italic_n ].",
|
| 141 |
+
"url": "http://arxiv.org/html/2304.00977v2/x15.png"
|
| 142 |
+
},
|
| 143 |
+
"4(c)": {
|
| 144 |
+
"figure_path": "2304.00977v2_figure_4(c).png",
|
| 145 |
+
"caption": "(c) Aggregate Scores\nFigure 4: MARL-eval (Gorsane et al., 2022; Agarwal et al., 2021) plots comparing the best performing combination, based on final performance after 250\u2062k250\ud835\udc58250k250 italic_k training steps, of x\ud835\udc65xitalic_x reincarnated agents for each x\u2208[0,n]\ud835\udc650\ud835\udc5bx\\in[0,n]italic_x \u2208 [ 0 , italic_n ].",
|
| 146 |
+
"url": "http://arxiv.org/html/2304.00977v2/x16.png"
|
| 147 |
+
},
|
| 148 |
+
"5": {
|
| 149 |
+
"figure_path": "2304.00977v2_figure_5.png",
|
| 150 |
+
"caption": "Figure A.1: The HalfCheetah environment (Wawrzynski, 2007; Todorov et al., 2012) viewed from the perspective of six separate agents (Peng et al., 2021). The array indices from the MAMuJoCo environment are given in brackets. Note that this diagram is purely illustrative and is not drawn with the correct relative scale.",
|
| 151 |
+
"url": "http://arxiv.org/html/2304.00977v2/x17.png"
|
| 152 |
+
},
|
| 153 |
+
"6(a)": {
|
| 154 |
+
"figure_path": "2304.00977v2_figure_6(a).png",
|
| 155 |
+
"caption": "(a) \u2018Good\u2019 Teacher Dataset\nFigure A.2: Histograms of episode returns from the two different datasets.",
|
| 156 |
+
"url": "http://arxiv.org/html/2304.00977v2/extracted/5948770/figures/good_profile_results.png"
|
| 157 |
+
},
|
| 158 |
+
"6(b)": {
|
| 159 |
+
"figure_path": "2304.00977v2_figure_6(b).png",
|
| 160 |
+
"caption": "(b) \u2018Good-Medium\u2019 Teacher Dataset\nFigure A.2: Histograms of episode returns from the two different datasets.",
|
| 161 |
+
"url": "http://arxiv.org/html/2304.00977v2/extracted/5948770/figures/good_medium_profile_results.png"
|
| 162 |
+
},
|
| 163 |
+
"7(a)": {
|
| 164 |
+
"figure_path": "2304.00977v2_figure_7(a).png",
|
| 165 |
+
"caption": "(a)\nFigure A.3: Comparisons of some interesting selective reincarnation patterns in HalfCheetah. In the plots, a solid line indicates the mean value over the runs, and the shaded region indicates one standard error above and below the mean.",
|
| 166 |
+
"url": "http://arxiv.org/html/2304.00977v2/x19.png"
|
| 167 |
+
},
|
| 168 |
+
"7(b)": {
|
| 169 |
+
"figure_path": "2304.00977v2_figure_7(b).png",
|
| 170 |
+
"caption": "(b)\nFigure A.3: Comparisons of some interesting selective reincarnation patterns in HalfCheetah. In the plots, a solid line indicates the mean value over the runs, and the shaded region indicates one standard error above and below the mean.",
|
| 171 |
+
"url": "http://arxiv.org/html/2304.00977v2/x21.png"
|
| 172 |
+
},
|
| 173 |
+
"7(c)": {
|
| 174 |
+
"figure_path": "2304.00977v2_figure_7(c).png",
|
| 175 |
+
"caption": "(c)\nFigure A.3: Comparisons of some interesting selective reincarnation patterns in HalfCheetah. In the plots, a solid line indicates the mean value over the runs, and the shaded region indicates one standard error above and below the mean.",
|
| 176 |
+
"url": "http://arxiv.org/html/2304.00977v2/x23.png"
|
| 177 |
+
},
|
| 178 |
+
"7(d)": {
|
| 179 |
+
"figure_path": "2304.00977v2_figure_7(d).png",
|
| 180 |
+
"caption": "(d)\nFigure A.3: Comparisons of some interesting selective reincarnation patterns in HalfCheetah. In the plots, a solid line indicates the mean value over the runs, and the shaded region indicates one standard error above and below the mean.",
|
| 181 |
+
"url": "http://arxiv.org/html/2304.00977v2/x25.png"
|
| 182 |
+
}
|
| 183 |
+
},
|
| 184 |
+
"validation": true,
|
| 185 |
+
"references": [
|
| 186 |
+
{
|
| 187 |
+
"1": {
|
| 188 |
+
"title": "Deep reinforcement learning at the edge of the statistical precipice.",
|
| 189 |
+
"author": "Rishabh Agarwal, Max Schwarzer, Pablo Samuel Castro, Aaron Courville, and\nMarc G. Bellemare.",
|
| 190 |
+
"venue": "In Advances in Neural Information Processing Systems, 2021.",
|
| 191 |
+
"url": null
|
| 192 |
+
}
|
| 193 |
+
},
|
| 194 |
+
{
|
| 195 |
+
"2": {
|
| 196 |
+
"title": "Reincarnating reinforcement learning: Reusing prior computation to\naccelerate progress.",
|
| 197 |
+
"author": "Rishabh Agarwal, Max Schwarzer, Pablo Samuel Castro, Aaron Courville, and\nMarc G Bellemare.",
|
| 198 |
+
"venue": "In Advances in Neural Information Processing Systems, 2022.",
|
| 199 |
+
"url": null
|
| 200 |
+
}
|
| 201 |
+
},
|
| 202 |
+
{
|
| 203 |
+
"3": {
|
| 204 |
+
"title": "Efficient online reinforcement learning with offline data, 2023.",
|
| 205 |
+
"author": "Philip J. Ball, Laura Smith, Ilya Kostrikov, and Sergey Levine.",
|
| 206 |
+
"venue": "URL https://arxiv.org/abs/2302.02948.",
|
| 207 |
+
"url": null
|
| 208 |
+
}
|
| 209 |
+
},
|
| 210 |
+
{
|
| 211 |
+
"4": {
|
| 212 |
+
"title": "Improving td3-bc: Relaxed policy constraint for offline learning and\nstable online fine-tuning.",
|
| 213 |
+
"author": "Alex Beeson and Giovanni Montana.",
|
| 214 |
+
"venue": "In 3rd Offline Reinforcement Learning Workshop at Neural\nInformation Processing Systems: Offline RL as a\u201dLaunchpad\u201d, 2022.",
|
| 215 |
+
"url": null
|
| 216 |
+
}
|
| 217 |
+
},
|
| 218 |
+
{
|
| 219 |
+
"5": {
|
| 220 |
+
"title": "Dota 2 with large scale deep reinforcement learning.",
|
| 221 |
+
"author": "Christopher Berner, Greg Brockman, Brooke Chan, Vicki Cheung, Przemyslaw\nDebiak, Christy Dennison, David Farhi, Quirin Fischer, Shariq Hashme,\nChristopher Hesse, Rafal J\u00f3zefowicz, Scott Gray, Catherine Olsson,\nJakub Pachocki, Michael Petrov, Henrique Pond\u00e9 de Oliveira Pinto,\nJonathan Raiman, Tim Salimans, Jeremy Schlatter, Jonas Schneider, Szymon\nSidor, Ilya Sutskever, Jie Tang, Filip Wolski, and Susan Zhang.",
|
| 222 |
+
"venue": "CoRR, abs/1912.06680, 2019.",
|
| 223 |
+
"url": null
|
| 224 |
+
}
|
| 225 |
+
},
|
| 226 |
+
{
|
| 227 |
+
"6": {
|
| 228 |
+
"title": "The complexity of decentralized control of markov decision processes.",
|
| 229 |
+
"author": "Daniel S Bernstein, Robert Givan, Neil Immerman, and Shlomo Zilberstein.",
|
| 230 |
+
"venue": "Mathematics of operations research, 27(4):819\u2013840, 2002.",
|
| 231 |
+
"url": null
|
| 232 |
+
}
|
| 233 |
+
},
|
| 234 |
+
{
|
| 235 |
+
"7": {
|
| 236 |
+
"title": "The influence of pattern similarity and transfer learning upon\ntraining of a base perceptron b2.",
|
| 237 |
+
"author": "Stevo Bozinovski and Ante Fulgosi.",
|
| 238 |
+
"venue": "In Proceedings of Symposium Informatica, volume 3, pp. 121\u2013126, 1976.",
|
| 239 |
+
"url": null
|
| 240 |
+
}
|
| 241 |
+
},
|
| 242 |
+
{
|
| 243 |
+
"8": {
|
| 244 |
+
"title": "A survey on multi-agent deep reinforcement learning: from the\nperspective of challenges and applications.",
|
| 245 |
+
"author": "Wei Du and Shifei Ding.",
|
| 246 |
+
"venue": "Artificial Intelligence Review, 54:3215\u20133238, 2021.",
|
| 247 |
+
"url": null
|
| 248 |
+
}
|
| 249 |
+
},
|
| 250 |
+
{
|
| 251 |
+
"9": {
|
| 252 |
+
"title": "Probabilistic policy reuse in a reinforcement learning agent.",
|
| 253 |
+
"author": "Fernando Fern\u00e1ndez and Manuela Veloso.",
|
| 254 |
+
"venue": "In Proceedings of the fifth international joint conference on\nAutonomous agents and multiagent systems, pp. 720\u2013727, 2006.",
|
| 255 |
+
"url": null
|
| 256 |
+
}
|
| 257 |
+
},
|
| 258 |
+
{
|
| 259 |
+
"10": {
|
| 260 |
+
"title": "Off-the-grid marl: a framework for dataset generation with baselines\nfor cooperative offline multi-agent reinforcement learning, 2023.",
|
| 261 |
+
"author": "Claude Formanek, Asad Jeewa, Jonathan Shock, and Arnu Pretorius.",
|
| 262 |
+
"venue": "URL https://arxiv.org/abs/2302.00521.",
|
| 263 |
+
"url": null
|
| 264 |
+
}
|
| 265 |
+
},
|
| 266 |
+
{
|
| 267 |
+
"11": {
|
| 268 |
+
"title": "A minimalist approach to offline reinforcement learning.",
|
| 269 |
+
"author": "Scott Fujimoto and Shixiang Shane Gu.",
|
| 270 |
+
"venue": "Advances in neural information processing systems,\n34:20132\u201320145, 2021.",
|
| 271 |
+
"url": null
|
| 272 |
+
}
|
| 273 |
+
},
|
| 274 |
+
{
|
| 275 |
+
"12": {
|
| 276 |
+
"title": "Knowru: Knowledge reuse via knowledge distillation in multi-agent\nreinforcement learning.",
|
| 277 |
+
"author": "Zijian Gao, Kele Xu, Bo Ding, and Huaimin Wang.",
|
| 278 |
+
"venue": "Entropy, 23(8):1043, 2021.",
|
| 279 |
+
"url": null
|
| 280 |
+
}
|
| 281 |
+
},
|
| 282 |
+
{
|
| 283 |
+
"13": {
|
| 284 |
+
"title": "Towards a standardised performance evaluation protocol for\ncooperative marl.",
|
| 285 |
+
"author": "Rihab Gorsane, Omayma Mahjoub, Ruan John de Kock, Roland Dubb, Siddarth Singh,\nand Arnu Pretorius.",
|
| 286 |
+
"venue": "In Advances in Neural Information Processing Systems, 2022.",
|
| 287 |
+
"url": null
|
| 288 |
+
}
|
| 289 |
+
},
|
| 290 |
+
{
|
| 291 |
+
"14": {
|
| 292 |
+
"title": "Making efficient use of demonstrations to solve hard exploration\nproblems.",
|
| 293 |
+
"author": "\u00c7aglar G\u00fcl\u00e7ehre, Tom Le Paine, Bobak Shahriari, Misha Denil,\nMatt Hoffman, Hubert Soyer, Richard Tanburn, Steven Kapturowski, Neil C.\nRabinowitz, Duncan Williams, Gabriel Barth-Maron, Ziyu Wang, Nando\nde Freitas, and Worlds Team.",
|
| 294 |
+
"venue": "In 8th International Conference on Learning Representations,\nICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net, 2020.",
|
| 295 |
+
"url": null
|
| 296 |
+
}
|
| 297 |
+
},
|
| 298 |
+
{
|
| 299 |
+
"15": {
|
| 300 |
+
"title": "Deep recurrent q-learning for partially observable mdps.",
|
| 301 |
+
"author": "Matthew Hausknecht and Peter Stone.",
|
| 302 |
+
"venue": "In 2015 aaai fall symposium series, 2015.",
|
| 303 |
+
"url": null
|
| 304 |
+
}
|
| 305 |
+
},
|
| 306 |
+
{
|
| 307 |
+
"16": {
|
| 308 |
+
"title": "\u201cother-play\u201d for zero-shot coordination.",
|
| 309 |
+
"author": "Hengyuan Hu, Adam Lerer, Alex Peysakhovich, and Jakob Foerster.",
|
| 310 |
+
"venue": "In Hal Daum\u00e9 III and Aarti Singh (eds.), Proceedings of the\n37th International Conference on Machine Learning, volume 119 of\nProceedings of Machine Learning Research, pp. 4399\u20134410. PMLR,\n13\u201318 Jul 2020.",
|
| 311 |
+
"url": null
|
| 312 |
+
}
|
| 313 |
+
},
|
| 314 |
+
{
|
| 315 |
+
"17": {
|
| 316 |
+
"title": "Emergent bartering behaviour in multi-agent reinforcement learning.",
|
| 317 |
+
"author": "Michael Bradley Johanson, Edward Hughes, Finbarr Timbers, and Joel Z Leibo.",
|
| 318 |
+
"venue": "arXiv preprint arXiv:2205.06760, 2022.",
|
| 319 |
+
"url": null
|
| 320 |
+
}
|
| 321 |
+
},
|
| 322 |
+
{
|
| 323 |
+
"18": {
|
| 324 |
+
"title": "Transfer learning method using ontology for heterogeneous multi-agent\nreinforcement learning.",
|
| 325 |
+
"author": "Hitoshi Kono, Akiya Kamimura, Kohji Tomita, Yuta Murata, and Tsuyoshi Suzuki.",
|
| 326 |
+
"venue": "International Journal of Advanced Computer Science and\nApplications, 5(10), 2014.",
|
| 327 |
+
"url": null
|
| 328 |
+
}
|
| 329 |
+
},
|
| 330 |
+
{
|
| 331 |
+
"19": {
|
| 332 |
+
"title": "Continuous control with deep reinforcement learning.",
|
| 333 |
+
"author": "Timothy P. Lillicrap, Jonathan J. Hunt, Alexander Pritzel, Nicolas Heess, Tom\nErez, Yuval Tassa, David Silver, and Daan Wierstra.",
|
| 334 |
+
"venue": "In Yoshua Bengio and Yann LeCun (eds.), ICLR, 2016.",
|
| 335 |
+
"url": null
|
| 336 |
+
}
|
| 337 |
+
},
|
| 338 |
+
{
|
| 339 |
+
"20": {
|
| 340 |
+
"title": "Multi-agent actor-critic for mixed cooperative-competitive\nenvironments.",
|
| 341 |
+
"author": "Ryan Lowe, YI WU, Aviv Tamar, Jean Harb, OpenAI Pieter Abbeel, and Igor\nMordatch.",
|
| 342 |
+
"venue": "In I. Guyon, U. Von Luxburg, S. Bengio, H. Wallach, R. Fergus,\nS. Vishwanathan, and R. Garnett (eds.), Advances in Neural Information\nProcessing Systems, volume 30. Curran Associates, Inc., 2017.",
|
| 343 |
+
"url": null
|
| 344 |
+
}
|
| 345 |
+
},
|
| 346 |
+
{
|
| 347 |
+
"21": {
|
| 348 |
+
"title": "Offline pre-trained multi-agent decision transformer: One big\nsequence model conquers all starcraftii tasks.",
|
| 349 |
+
"author": "Linghui Meng, Muning Wen, Yaodong Yang, Chenyang Le, Xiyun Li, Weinan Zhang,\nYing Wen, Haifeng Zhang, Jun Wang, and Bo Xu.",
|
| 350 |
+
"venue": "arXiv preprint arXiv:2112.02845, 2021.",
|
| 351 |
+
"url": null
|
| 352 |
+
}
|
| 353 |
+
},
|
| 354 |
+
{
|
| 355 |
+
"22": {
|
| 356 |
+
"title": "Playing atari with deep reinforcement learning.",
|
| 357 |
+
"author": "Volodymyr Mnih, Koray Kavukcuoglu, David Silver, Alex Graves, Ioannis\nAntonoglou, Daan Wierstra, and Martin Riedmiller.",
|
| 358 |
+
"venue": "NIPS Deep Learning Workshop 2013, 2013.",
|
| 359 |
+
"url": null
|
| 360 |
+
}
|
| 361 |
+
},
|
| 362 |
+
{
|
| 363 |
+
"23": {
|
| 364 |
+
"title": "Continuous coordination as a realistic scenario for lifelong\nlearning.",
|
| 365 |
+
"author": "Hadi Nekoei, Akilesh Badrinaaraayanan, Aaron Courville, and Sarath Chandar.",
|
| 366 |
+
"venue": "In International Conference on Machine Learning, pp. 8016\u20138024. PMLR, 2021.",
|
| 367 |
+
"url": null
|
| 368 |
+
}
|
| 369 |
+
},
|
| 370 |
+
{
|
| 371 |
+
"24": {
|
| 372 |
+
"title": "Dealing with non-stationarity in multi-agent deep reinforcement\nlearning.",
|
| 373 |
+
"author": "Georgios Papoudakis, Filippos Christianos, Arrasy Rahman, and Stefano V.\nAlbrecht.",
|
| 374 |
+
"venue": "arXiv preprint arXiv:1906.04737, 2019.",
|
| 375 |
+
"url": null
|
| 376 |
+
}
|
| 377 |
+
},
|
| 378 |
+
{
|
| 379 |
+
"25": {
|
| 380 |
+
"title": "Agent modelling under partial observability for deep reinforcement\nlearning.",
|
| 381 |
+
"author": "Georgios Papoudakis, Filippos Christianos, and Stefano V. Albrecht.",
|
| 382 |
+
"venue": "In Proceedings of the Neural Information Processing Systems\n(NeurIPS), 2021.",
|
| 383 |
+
"url": null
|
| 384 |
+
}
|
| 385 |
+
},
|
| 386 |
+
{
|
| 387 |
+
"26": {
|
| 388 |
+
"title": "Facmac: Factored multi-agent centralised policy gradients.",
|
| 389 |
+
"author": "Bei Peng, Tabish Rashid, Christian Schroeder de Witt, Pierre-Alexandre\nKamienny, Philip Torr, Wendelin B\u00f6hmer, and Shimon Whiteson.",
|
| 390 |
+
"venue": "Advances in Neural Information Processing Systems,\n34:12208\u201312221, 2021.",
|
| 391 |
+
"url": null
|
| 392 |
+
}
|
| 393 |
+
},
|
| 394 |
+
{
|
| 395 |
+
"27": {
|
| 396 |
+
"title": "Incremental multi-step q-learning.",
|
| 397 |
+
"author": "Jing Peng and Ronald J. Williams.",
|
| 398 |
+
"venue": "Machine Learning, 22:283\u2013290, 1994.",
|
| 399 |
+
"url": null
|
| 400 |
+
}
|
| 401 |
+
},
|
| 402 |
+
{
|
| 403 |
+
"28": {
|
| 404 |
+
"title": "Monotonic value function factorisation for deep multi-agent\nreinforcement learning.",
|
| 405 |
+
"author": "Tabish Rashid, Mikayel Samvelyan, Christian Schroeder De Witt, Gregory\nFarquhar, Jakob Foerster, and Shimon Whiteson.",
|
| 406 |
+
"venue": "J. Mach. Learn. Res., 21(1), jun 2020.",
|
| 407 |
+
"url": null
|
| 408 |
+
}
|
| 409 |
+
},
|
| 410 |
+
{
|
| 411 |
+
"29": {
|
| 412 |
+
"title": "The starcraft multi-agent challenge.",
|
| 413 |
+
"author": "Mikayel Samvelyan, Tabish Rashid, Christian Schroeder de Witt, Gregory\nFarquhar, Nantas Nardelli, Tim GJ Rudner, Chia-Man Hung, Philip HS Torr,\nJakob Foerster, and Shimon Whiteson.",
|
| 414 |
+
"venue": "In Proceedings of the 18th International Conference on\nAutonomous Agents and MultiAgent Systems, pp. 2186\u20132188, 2019.",
|
| 415 |
+
"url": null
|
| 416 |
+
}
|
| 417 |
+
},
|
| 418 |
+
{
|
| 419 |
+
"30": {
|
| 420 |
+
"title": "Kickstarting deep reinforcement learning, 2018.",
|
| 421 |
+
"author": "Simon Schmitt, Jonathan J. Hudson, Augustin Zidek, Simon Osindero, Carl\nDoersch, Wojciech M. Czarnecki, Joel Z. Leibo, Heinrich Kuttler, Andrew\nZisserman, Karen Simonyan, and S. M. Ali Eslami.",
|
| 422 |
+
"venue": "URL https://arxiv.org/abs/1803.03835.",
|
| 423 |
+
"url": null
|
| 424 |
+
}
|
| 425 |
+
},
|
| 426 |
+
{
|
| 427 |
+
"31": {
|
| 428 |
+
"title": "Mastering atari, go, chess and shogi by planning with a learned\nmodel.",
|
| 429 |
+
"author": "Julian Schrittwieser, Ioannis Antonoglou, Thomas Hubert, Karen Simonyan,\nLaurent Sifre, Simon Schmitt, Arthur Guez, Edward Lockhart, Demis Hassabis,\nThore Graepel, et al.",
|
| 430 |
+
"venue": "Nature, 588(7839):604\u2013609, 2020.",
|
| 431 |
+
"url": null
|
| 432 |
+
}
|
| 433 |
+
},
|
| 434 |
+
{
|
| 435 |
+
"32": {
|
| 436 |
+
"title": "Cnn features off-the-shelf: an astounding baseline for recognition.",
|
| 437 |
+
"author": "Ali Sharif Razavian, Hossein Azizpour, Josephine Sullivan, and Stefan Carlsson.",
|
| 438 |
+
"venue": "In Proceedings of the IEEE conference on computer vision and\npattern recognition workshops, pp. 806\u2013813, 2014.",
|
| 439 |
+
"url": null
|
| 440 |
+
}
|
| 441 |
+
},
|
| 442 |
+
{
|
| 443 |
+
"33": {
|
| 444 |
+
"title": "Mastering the game of go with deep neural networks and tree search.",
|
| 445 |
+
"author": "David Silver, Aja Huang, Chris J. Maddison, Arthur Guez, Laurent Sifre, George\nvan den Driessche, Julian Schrittwieser, Ioannis Antonoglou, Veda\nPanneershelvam, Marc Lanctot, Sander Dieleman, Dominik Grewe, John Nham, Nal\nKalchbrenner, Ilya Sutskever, Timothy Lillicrap, Madeleine Leach, Koray\nKavukcuoglu, Thore Graepel, and Demis Hassabis.",
|
| 446 |
+
"venue": "Nature, 529(7587):484\u2013489, Jan 2016.",
|
| 447 |
+
"url": null
|
| 448 |
+
}
|
| 449 |
+
},
|
| 450 |
+
{
|
| 451 |
+
"34": {
|
| 452 |
+
"title": "Mastering the game of go without human knowledge.",
|
| 453 |
+
"author": "David Silver, Julian Schrittwieser, Karen Simonyan, Ioannis Antonoglou, Aja\nHuang, Arthur Guez, Thomas Hubert, Lucas Baker, Matthew Lai, Adrian Bolton,\net al.",
|
| 454 |
+
"venue": "Nature, 550(7676):354\u2013359, 2017.",
|
| 455 |
+
"url": null
|
| 456 |
+
}
|
| 457 |
+
},
|
| 458 |
+
{
|
| 459 |
+
"35": {
|
| 460 |
+
"title": "Ad hoc autonomous agent teams: Collaboration without\npre-coordination.",
|
| 461 |
+
"author": "Peter Stone, Gal Kaminka, Sarit Kraus, and Jeffrey Rosenschein.",
|
| 462 |
+
"venue": "In Proceedings of the AAAI Conference on Artificial\nIntelligence, volume 24.1, pp. 1504\u20131509, 2010.",
|
| 463 |
+
"url": null
|
| 464 |
+
}
|
| 465 |
+
},
|
| 466 |
+
{
|
| 467 |
+
"36": {
|
| 468 |
+
"title": "Reinforcement Learning: An Introduction.",
|
| 469 |
+
"author": "Richard S. Sutton and Andrew G. Barto.",
|
| 470 |
+
"venue": "A Bradford Book, Cambridge, MA, USA, 2018.",
|
| 471 |
+
"url": null
|
| 472 |
+
}
|
| 473 |
+
},
|
| 474 |
+
{
|
| 475 |
+
"37": {
|
| 476 |
+
"title": "Multiagent cooperation and competition with deep reinforcement\nlearning, 2015.",
|
| 477 |
+
"author": "Ardi Tampuu, Tambet Matiisen, Dorian Kodelja, Ilya Kuzovkin, Kristjan Korjus,\nJuhan Aru, Jaan Aru, and Raul Vicente.",
|
| 478 |
+
"venue": "URL https://arxiv.org/abs/1511.08779.",
|
| 479 |
+
"url": null
|
| 480 |
+
}
|
| 481 |
+
},
|
| 482 |
+
{
|
| 483 |
+
"38": {
|
| 484 |
+
"title": "Mujoco: A physics engine for model-based control.",
|
| 485 |
+
"author": "Emanuel Todorov, Tom Erez, and Yuval Tassa.",
|
| 486 |
+
"venue": "In 2012 IEEE/RSJ International Conference on Intelligent Robots\nand Systems, pp. 5026\u20135033, 2012.",
|
| 487 |
+
"url": null
|
| 488 |
+
}
|
| 489 |
+
},
|
| 490 |
+
{
|
| 491 |
+
"39": {
|
| 492 |
+
"title": "Jump-start reinforcement learning, 2022.",
|
| 493 |
+
"author": "Ikechukwu Uchendu, Ted Xiao, Yao Lu, Banghua Zhu, Mengyuan Yan, Jos\u00e9phine\nSimon, Matthew Bennice, Chuyuan Fu, Cong Ma, Jiantao Jiao, Sergey Levine, and\nKarol Hausman.",
|
| 494 |
+
"venue": "URL https://arxiv.org/abs/2204.02372.",
|
| 495 |
+
"url": null
|
| 496 |
+
}
|
| 497 |
+
},
|
| 498 |
+
{
|
| 499 |
+
"40": {
|
| 500 |
+
"title": "Grandmaster level in starcraft ii using multi-agent reinforcement\nlearning.",
|
| 501 |
+
"author": "Oriol Vinyals, Igor Babuschkin, Wojciech M Czarnecki, Micha\u00ebl Mathieu,\nAndrew Dudzik, Junyoung Chung, David H Choi, Richard Powell, Timo Ewalds,\nPetko Georgiev, et al.",
|
| 502 |
+
"venue": "Nature, 575(7782):350\u2013354, 2019.",
|
| 503 |
+
"url": null
|
| 504 |
+
}
|
| 505 |
+
},
|
| 506 |
+
{
|
| 507 |
+
"41": {
|
| 508 |
+
"title": "Roma: Multi-agent reinforcement learning with emergent roles.",
|
| 509 |
+
"author": "Tonghan Wang, Heng Dong, Victor Lesser, and Chongjie Zhang.",
|
| 510 |
+
"venue": "In International Conference on Machine Learning, pp. 9876\u20139886. PMLR, 2020.",
|
| 511 |
+
"url": null
|
| 512 |
+
}
|
| 513 |
+
},
|
| 514 |
+
{
|
| 515 |
+
"42": {
|
| 516 |
+
"title": "Learning from delayed rewards.",
|
| 517 |
+
"author": "Christopher John Cornish Hellaby Watkins.",
|
| 518 |
+
"venue": "PhD Thesis, 1989.",
|
| 519 |
+
"url": null
|
| 520 |
+
}
|
| 521 |
+
},
|
| 522 |
+
{
|
| 523 |
+
"43": {
|
| 524 |
+
"title": "Learning to control a 6-degree-of-freedom walking robot.",
|
| 525 |
+
"author": "Pawel Wawrzynski.",
|
| 526 |
+
"venue": "In EUROCON 2007-The International Conference on\" Computer as a\nTool\", pp. 698\u2013705. IEEE, 2007.",
|
| 527 |
+
"url": null
|
| 528 |
+
}
|
| 529 |
+
},
|
| 530 |
+
{
|
| 531 |
+
"44": {
|
| 532 |
+
"title": "Outracing champion gran turismo drivers with deep reinforcement\nlearning.",
|
| 533 |
+
"author": "Peter R Wurman, Samuel Barrett, Kenta Kawamoto, James MacGlashan, Kaushik\nSubramanian, Thomas J Walsh, Roberto Capobianco, Alisa Devlic, Franziska\nEckert, Florian Fuchs, et al.",
|
| 534 |
+
"venue": "Nature, 602(7896):223\u2013228, 2022.",
|
| 535 |
+
"url": null
|
| 536 |
+
}
|
| 537 |
+
},
|
| 538 |
+
{
|
| 539 |
+
"45": {
|
| 540 |
+
"title": "Transfer learning in deep reinforcement learning: A survey.",
|
| 541 |
+
"author": "Zhuangdi Zhu, Kaixiang Lin, and Jiayu Zhou.",
|
| 542 |
+
"venue": "arXiv preprint arXiv:2009.07888, 2020.",
|
| 543 |
+
"url": null
|
| 544 |
+
}
|
| 545 |
+
}
|
| 546 |
+
],
|
| 547 |
+
"url": "http://arxiv.org/html/2304.00977v2"
|
| 548 |
+
}
|
20241030/2305.12715v4.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2305.14434v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2307.08235v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2307.08925v3.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2307.10349v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2309.04459v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2309.12927v3.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2310.05185v3.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2310.07355v5.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2310.08975v3.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2310.14692v3.json
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "On Unsupervised Partial Shape Correspondence",
|
| 3 |
+
"abstract": "While dealing with matching shapes to their parts, we often apply a tool known as functional maps.\nThe idea is to translate the shape matching problem into \u201cconvenient\u201d spaces by which matching is performed algebraically by solving a least squares problem.\nHere, we argue that such formulations, though popular in this field, introduce errors in the estimated match when partiality is invoked.\nSuch errors are unavoidable even for advanced feature extraction networks, and they can be shown to escalate with increasing degrees of shape partiality, adversely affecting the learning capability of such systems.\nTo circumvent these limitations, we propose a novel approach for partial shape matching.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "In recent years, the emergence of deep learning allowed significant leap forwards in efficiently solving computational problems, like finding the point-to-point correspondence between two sampled surfaces.\nAnd yet, for unsupervised partial shape matching the contribution of deep learning was somewhat limited.\nMost state-of-the-art shape correspondence approaches rely on the functional maps framework [34 ###reference_b34###], which searches for the mapping between spectral representations of corresponding features on the two input shapes.\nOne popular framework uses a functional map layer [26 ###reference_b26###] as a differentiable component in a neural network architecture.\nWe will show that such a functional map layer introduces inherent errors in the case of partial shape matching.\nTheoretically, we could overcome such unavoidable errors of using functional maps as layers in a neural network.\nFor example, one could first detect the corresponding parts by matching the spectra of an appropriate Hamiltonian of the part with that of the Laplacian of the whole\n[36 ###reference_b36###, 5 ###reference_b5###].\nAfter detecting the matching parts,\nthe point-to-point correspondence can be solved by resorting to an algebraic problem defined by the relation of projected scalar feature functions onto spectral domains, aka functional maps.\nState-of-the-art shape correspondence approaches often involve three components [20 ###reference_b20###, 40 ###reference_b40###, 42 ###reference_b42###, 14 ###reference_b14###, 41 ###reference_b41###, 16 ###reference_b16###].\nInitially, a network computes refined pointwise features.\nNext, a functional map layer uses these features to estimate the correspondence between the shapes.\nFinally, loss functions based on the resulting functional map are applied during training to further adapt the features to the data at hand.\nIn this paper, we analyse the use of least squares for estimating the functional map in the functional map layer and focus on the resulting errors involved in partiality.\nWhile previous studies argue that it should be possible to use least squares in the functional map layer to obtain the ground-truth map without errors [4 ###reference_b4###], here, we prove that the projected errors cannot be avoided in such a setting.\nFurthermore, the magnitude of the error directly relates to the degree of partiality \u2013 the smaller the part of the shape the larger the error.\nThe reason for this error stems from the fact that the least squares method considers the inner product of the shape\u2019s basis functions and the feature descriptors.\nThat inner product involves integration over the whole shape.\nWith this observation in mind, we propose a novel approach for partial shape matching, surpassing all existing unsupervised techniques.\nThe proposed method deviates from the conventional process of generating a functional map from the extracted features prior to the correspondence.\nWe avoid the functional map formulation altogether and establish correspondence between partial and full shapes through direct feature matching.\nIn essence, we demonstrate that for partial shape matching, choosing an appropriate loss, like the Gromov Distance [11 ###reference_b11###, 13 ###reference_b13###, 32 ###reference_b32###], is as important as operating in convenient spaces, like the eigenspaces of the corresponding shape laplacians.\nMoreover, when treated as a tool implementing intrinsic smoothing [6 ###reference_b6###] rather than a functional map network layer [26 ###reference_b26###], one could harness the power of the Laplace-Beltrami operator (LBO) spectral representation for state-of-the-art solutions for shape matching.\nThe proposed network optimization process involves a two term loss.\nThe core, inspired by [20 ###reference_b20###, 11 ###reference_b11###, 33 ###reference_b33###, 9 ###reference_b9###, 10 ###reference_b10###],\nutilizes the near isometry property of surfaces in nature as a measure of correspondence that can serve as an unsupervised/semi-supervised way of training our network.\nThis loss measures the distortions of geodesic distance between corresponding points, when mapping one shape to the other.\nFor the second regularization-term we suggest two penalizing options, related to the area preserving mapping [40 ###reference_b40###], and an option to avoid the need to compute the functional map altogether.\nWe test the proposed framework on the SHREC\u201916 dataset [15 ###reference_b15###], containing two different types of shape partiality benchmarks, cuts and holes.\nWe also evaluate it on our new dataset, PFAUST, with two different densities of holes.\nWe show that the proposed method has the lowest error in comparison to previous unsupervised partial shape matching methods.\nContributions.\nWe prove that for partial shape matching a functional map layer introduces unavoidable errors into the resulting correspondence function.\nWe introduce PFAUST, a new benchmark that includes two datasets with different densities of holes, dedicated to partial-to-full shape correspondence.\nWe present a novel approach for partial shape matching avoiding the functional map layer altogether.\nThe proposed method reaches state-of-the-art results on the SHREC\u201916 and PFAUST by a large margin.\n###figure_1###"
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Related efforts",
|
| 15 |
+
"text": "Deep learning was first introduced for dense shape correspondence in [26 ###reference_b26###] proposing a supervised approach known as FM-Net, based on a differentiable functional map layer.\nUnsupervised versions of the FM-Net appeared in [20 ###reference_b20###, 40 ###reference_b40###].\nTo transform the supervised approach [26 ###reference_b26###] into an unsupervised one, [20 ###reference_b20###] suggested a loss based on the Gromov-Wasserstein Distance.\nThat distance was suggested as a measure of choice for defining the correspondence of surfaces in nature, see [18 ###reference_b18###, 31 ###reference_b31###, 11 ###reference_b11###, 8 ###reference_b8###, 35 ###reference_b35###, 19 ###reference_b19###, 17 ###reference_b17###].\nIn another example, it was used to align the eigenfunctions of the scale invariant LBO of two shapes, see [8 ###reference_b8###].\nHere, we use the Gromov-Wasserstein loss without resorting to the functional map layer.\nLater papers suggested new architectures [42 ###reference_b42###, 16 ###reference_b16###, 41 ###reference_b41###, 25 ###reference_b25###] improving the accuracy on networks trained in both supervised and unsupervised settings.\nIn [29 ###reference_b29###], it was argued that learning robust basis functions instead of the LBO eigenfunctions improves correspondence results.\nIn [24 ###reference_b24###], it was suggested to avoid the functional map framework for correspondence of full shapes, relying instead on a supervised contrastive loss.\nThe correspondence is done directly between the output network features, while a Dirichlet energy is minimized as part of the optimization to enforce smoothness provided by functional map methods.\nRecently, a method for shape correspondence [14 ###reference_b14###] incorporates a loss [3 ###reference_b3###], penalising the difference between the estimated functional map from the functional map layer, and the functional map extracted from soft correspondences calculated by cosine similarity between the output features of the network.\nIt was noticed in [4 ###reference_b4###] that when matching partial shapes the functional map framework requires some assistance.\nMotivated by this observation, a network was introduced with an added attention component that makes the feature extractor aware of features on the other shape.\nThe network outputs vanilla features like other methods, those are then fed to the attention layer that refines the features.\nTheoretically, features of points on the full shape that do not belong to the matched part can be zeroed out with this approach.\nAnd yet, in our experiments we noticed that this method is hard to train.\nIndeed, in [4 ###reference_b4###] only the non-refined features, without the attention part, were used for the functional map on the SHREC\u201916-CUTS benchmark.\nIn [39 ###reference_b39###], relations between eigenvalues, eigenfunctions, and the functional map matrix for partial shapes are studied.\nUnlike [39 ###reference_b39###], we explore the error introduced by shape partiality to the functional map matrix produced by the FM-layer, which is a least squares estimate of the matrix extracted from the ground truth correspondence.\nWhile the error in [39 ###reference_b39###] relates to the length of the cut, our analysis exhibits a relation to the missing area."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "3",
|
| 19 |
+
"parent_section_id": null,
|
| 20 |
+
"section_name": " Functional maps and shape partiality",
|
| 21 |
+
"text": "In\nSec. 3.1 ###reference_###\nwe briefly provide the background for functional maps, and in Sec. 3.2 ###reference_### and in the supplementary we analyse its issues with partial shape matching."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "3.1",
|
| 25 |
+
"parent_section_id": "3",
|
| 26 |
+
"section_name": "Functional maps",
|
| 27 |
+
"text": "The notion of functional maps was introduced to non-rigid shape correspondence in [34 ###reference_b34###].\nGiven , a bijective mapping between two surfaces and , then induces a natural functional mapping between scalar functions defined on each surface.\nGiven two orthonormal bases and of and ,\nwe can decompose any function and its corresponding function in these bases.\nRemarkably, the coefficients and are related by a linear transformation depending only on the mapping and the choice of bases,\n,\nwhere the coefficients define the functional map.\nWhen is unknown, finding usually requires solving a least squares optimization problem, with constraints imposed by corresponding descriptor functions\n\ndefined on the surfaces and .\nDefining\n\nand\n,\nthe matrix can be computed by solving\nThere is no restriction of which basis to choose.\nHowever, it was shown in [1 ###reference_b1###] that selecting the leading eigenfunctions of the LBO, when ordered by their corresponding eigenvalues small to large, optimally and uniquely approximate the family of smooth functions with bounded Dirichlet energy.\nWhen mentioning basis functions, eigenfunctions, or eigenvectors in this paper we refer to these low pass subsets of LBO leading basis elements.\nThe least squares problem Eq. (1 ###reference_###)\nis solved by pseudo-inversion.\nIn deep learning frameworks, this operation is called the functional map layer (FM-layer) [26 ###reference_b26###], and\ndeep learning pipelines for shape correspondence often incorporate it\n[40 ###reference_b40###, 4 ###reference_b4###, 14 ###reference_b14###, 24 ###reference_b24###, 20 ###reference_b20###].\nThe FM-layer computes the solution to Eq. (1 ###reference_###) as\nHere, is a spectral mapping between and .\nIn the discrete setting,\ndenoting and the corresponding feature matrices on and ,\nthis becomes\nwhere \nis the matrix of basis functions on and\n is\nthe diagonal matrix of the areas about\nvertices\nof the triangulated surface .\nFor conciseness,\nwe\nwrite"
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3.2",
|
| 31 |
+
"parent_section_id": "3",
|
| 32 |
+
"section_name": "Shape partiality",
|
| 33 |
+
"text": "Most deep learning approaches for shape correspondence use the same three components.\nFirst, a neural network generates features at each point of the surface.\nThen, these features are fed to an FM-layer to compute the functional map between the surfaces.\nNote, that this is the first time that the features of the two shapes interact.\nFinally, the functional map is used to estimate a pointwise correspondence between the two shapes.\nWe prove in this subsection that in the case of partial shapes, the use of an FM-layer introduces unavoidable errors.\nWe begin our analysis with the continuous case.\nGiven two surfaces and , where .\nDenote , and\nlet and be subsets of orthonormal bases of and , respectively.\nLet and be the coefficients of the corresponding feature functions on the corresponding surfaces.\nThen, the functional map layer between and computes a functional map , where\nHere, the superscripts and indicate a matrix containing only information related to regions of that correspond to the partial surfaces or , respectively.\nA proof can be found in the supplementary.\nThe matrix is the functional map relating and when considering only descriptors which vanish on .\nWe show in the supplementary that it is the functional map deduced from a perfect mapping between to .\nAt the other end, is an error term that can be interpreted as the functional map relating and when considering only descriptors that vanish on .\nWe readily deduce the following corollary,\nIf , then, the functional map layer injects an error into the functional map.\nGenerally, , thus, the functional map layer unavoidably introduces a significant error to the estimated functional map.\nThe above theorem also applies to the discrete case, a proof can be found in the supplementary..\nGiven a sampled surface , like a triangle mesh, split into two subsurfaces and .\nThe functional map layer between and yields a functional map , where\nIf the feature dimensions\n, where and are the number of sampled vertices of surfaces and ,\nand if both and have rank, and if both111Where means orthogonality with respect to the metric matrix .\n and\n, then, .\nIn practice, and the other assumptions are satisfied.\nThis means that\nfor\npartial shape matching, the FM-layer unavoidably introduces an error into the estimated functional map.\nNevertheless, as claimed in [4 ###reference_b4###], it is possible for the error to vanish.\nUnfortunately, this can only happen when , that is, in the unreasonable case where the descriptor dimensionality is equal to the total number of sampled vertices.\nObviously, taking or\n\nwould eliminate this error; however, any general feature extractor would not be aware of the given shape partiality, that is, the matching region of in .\nAs mentioned, the features are extracted independently for each shape without any interaction before the FM-layer.\nAs such, we cannot have a general meaningful feature extractor that provides on .\nSee Fig. 2 ###reference_### for an example of the computed matrices and .\nMethods such as [39 ###reference_b39###] and those relying on [37 ###reference_b37###] deal with shape partiality by masking the matrix .\nOne could easily generalise the proposed analysis along this line of thought, by which one introduces an additive mask matrix to .\nWe explore in detail the error induced by the FM-layer in the supplementary.\nWe show that under simplifying assumptions the error of the FM-layer is proportional to the missing area.\n###figure_2### ###figure_3### ###figure_4###"
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "4",
|
| 37 |
+
"parent_section_id": null,
|
| 38 |
+
"section_name": "Method",
|
| 39 |
+
"text": "###figure_5### We have proven that the FM-layer involves an unavoidable error when applied to partial shape matching.\nUtilizing this knowledge, we propose to avoid an FM-layer and apply a simple and effective approach, see Figure 1 ###reference_###.\nDifferentiable softmax of point features\u2019 inner product in the context of shape matching were first introduced in\n[14 ###reference_b14###, 3 ###reference_b3###].\nWe follow a similar line of thought, and estimate a soft correspondence matrix directly from the feature similarity of the two shapes using the softmax operator.\nUnlike the methods suggested in [14 ###reference_b14###, 3 ###reference_b3###], we do not feed the features from the feature extractor to an FM-layer.\nThe correspondence is extracted using only feature similarity.\nThis correspondence matrix is an input to our loss functions.\nThe main component of the proposed loss was first used in [20 ###reference_b20###] in the context of learning surface matching.\nIt penalizes the distortion of surface inter-geodesic distances between corresponding pairs of points.\nFor regularization, we propose two options.\nThe first option [40 ###reference_b40###] is an orthogonality regularization of the estimated functional map extracted from the soft correspondence matrix,\nbut it does not involve an FM-layer.\nIt allows to apply a low pass filter on the computed correspondence in the dual spectral domain.\nTo compute the functional map, we simply multiply the correspondence matrix by the leading LBO basis functions.\nWe also propose an alternative regularization option.\nIt is based on the assumption that the low frequency or leading eigenfunctions on restricted to the regions corresponding to could be well approximated by the low frequency leading eigenfunctions on .\nThis\noption avoids the need to compute the functional map altogether."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "5",
|
| 43 |
+
"parent_section_id": null,
|
| 44 |
+
"section_name": "Experiments",
|
| 45 |
+
"text": "Here, we compare our method to current partial shape matching techniques.222Code and data can be found at https://github.com/ABracha/DirectMatchNet ###reference_###\nand https://github.com/ABracha/PFAUST ###reference_github.com/ABracha/PFAUST###."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "6",
|
| 49 |
+
"parent_section_id": null,
|
| 50 |
+
"section_name": "Conclusion",
|
| 51 |
+
"text": "In this paper, we explored the problem of unsupervised partial shape matching.\nOur analysis unveiled a persistent obstacle when using spectral domains as part of neural network architectures rather than just as part of loss functions.\nOur study showed that using the functional maps framework via least-squares estimation inevitably introduces errors when shape partiality is involved, an issue that escalates with the increase in missing areas of the matched parts.\nAs a result of our findings, we proposed a novel approach that deviates from conventional schools of thought.\nRather than estimating the functional map on the computed features via least squares, and then extract the correspondences from it, we directly compute the correspondence between partial and full shapes through feature similarity.\nThe proposed method achieves superior results compared to previous methods on the SHREC\u201916 and the new PFAUST datasets, significantly surpassing existing unsupervised methods in the field of partial shape matching of non-rigid shapes.\nThe compelling results endorse the efficacy of our method and proves that spectral representations can be suited for partial shape correspondence when applied properly.\nThe proposed approach obviously involves limitations.\nFirst, in its current form, it is not designed for part-to-part shape correspondence.\nIn that case, needs to be revisited, since, when defining we assume that each point in the partial shape has a corresponding point on the full shape.\nAnother issue is that geodesic distances can be significantly altered by cuts and holes, which affects the contribution of .\nAnd yet, it is far more stable than current unsupervised losses based on functional maps, as demonstrated in the ablation study.\nThis issue is revisited in a follow-up paper [7 ###reference_b7###] where we introduce the wormhole criterion that filters out potentially altered geodesic distances.\nAn additional limitation is that partial and full shapes should be of the same scale.\nThis is a common assumption in partial shape matching that is made by most methods, including those based on functional maps.\nThe missing parts also alter the LBO eigenfunctions, which affects DiffusionNet as it uses LBO eigendecomposition to learn heat diffusion on the shape.\nIn practice, DiffusionNet mitigates this by learning to shorten the time scale in the presence of missing parts.\nFinally, designing a scale-invariant approach, for example, by using a scale-invariant metric, or opting for anisotropic or asymmetric metrics [43 ###reference_b43###], is of great interest that goes beyond the scope of this paper."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "7",
|
| 55 |
+
"parent_section_id": null,
|
| 56 |
+
"section_name": "Proof of Theorem 3.1",
|
| 57 |
+
"text": "By splitting\nintegrals on\n into\na\nsum of integrals over and ,\nwe have,\nThe FM-layer computes the functional map in Eq. (2 ###reference_###).\nWe can plug in into Eq. (2 ###reference_###) and obtain\n."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "8",
|
| 61 |
+
"parent_section_id": null,
|
| 62 |
+
"section_name": "Proof of Theorem 3.2",
|
| 63 |
+
"text": "Let , , and be the number of vertices of , , and , respectively.\nWithout loss of generality, we sort the vertices of such that those corresponding to the vertices in appear first and in the same order as those of .\nThereby, we can write,\nwhere and .\nDenote, by and the feature matrices of and , respectively.\nBased on our ordering, we have that,\nHere, are the features extracted from vertices on located on the subsurface ,\nand similarly for .\nWe can rewrite using Eqs. (26 ###reference_###) and (25 ###reference_###) as\nsince vertices on both and are associated to the same area on both shapes, meaning that , and likewise ,\nwhere and are the restrictions of to the matching part of and on , respectively.\nPlugging Eq. (27 ###reference_###) into Eq. (3 ###reference_###), is the sum of two terms,\nthe self-functional map of with its correspondence in with respect to the basis and ,\nand,\nthe unavoidable error injected into ."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "9",
|
| 67 |
+
"parent_section_id": null,
|
| 68 |
+
"section_name": "Error analysis of the FM-layer",
|
| 69 |
+
"text": "To better understand the two components that form , let us simplify the problem.\nAssume that is composed of two disconnected sub-surfaces and .\nAs the surfaces and are disconnected, the eigenfunctions of the LBO of consist of two disjoint sets of functions, assuming different modes.\nThese two disjoint sets are interleaving according to increasing eigenvalues, where one set contains functions that are the eigenfunction of the LBO of extended to by taking zero values for every point on , and vice versa for the other set.\nAdditionally, we assume that we have a feature extractor which is robust to partial shape matching, that is,\nwhere is the exact correspondence matrix between and .\nMoreover, we assume that the extracted features have rank .\nWe now look for the functional map that would give us a perfect match between and .\nThat is,\nwhere, w.l.o.g. we choose and as the eigenfunctions of the LBO of surfaces and , respectively, and we sort the vertices of such that those corresponding to the vertices in appear first and in the same order as those of .\nwe have that where\n is the identity matrix until column and only zeros columns afterwards.\nWithout loss of generality, we\nsort so that its leading eigenfunctions are those\nrelated to , and then those related to .\nThus, we have,\nUsing our previous assumptions in Eq. (6 ###reference_###) gives us,\nThis is the ideal functional map as it defined by the given mapping between the surfaces and .\nTherefore, the second term is a an error which is proportional to the area of ."
|
| 70 |
+
}
|
| 71 |
+
],
|
| 72 |
+
"appendix": [],
|
| 73 |
+
"tables": {
|
| 74 |
+
"1": {
|
| 75 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S5.T1\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S5.T1.38.1.1\" style=\"font-size:90%;\">Table 1</span>: </span><span class=\"ltx_text\" id=\"S5.T1.39.2\" style=\"font-size:90%;\">Quantitative results on SHREC\u201916.\nThe numbers correspond to the mean geodesic error (scaled by 100), and the result using post processing refinement.\nBest performance is marked in bold and second best is underlined.\nThe results demonstrate the superiority of the proposed method in the unsupervised arena.\nIt outperformed both supervised and unsupervised approaches on the HOLES benchmark, which contains extremely challenging scenarios of shape parts.\n</span></figcaption>\n<div class=\"ltx_inline-block ltx_align_center ltx_transformed_outer\" id=\"S5.T1.36\" style=\"width:433.6pt;height:210.5pt;vertical-align:-0.9pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(-25.2pt,12.2pt) scale(0.895804558977087,0.895804558977087) ;\">\n<table class=\"ltx_tabular ltx_align_middle\" id=\"S5.T1.36.36\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S5.T1.36.36.37.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S5.T1.36.36.37.1.1\">Test-set</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" colspan=\"2\" id=\"S5.T1.36.36.37.1.2\"><span class=\"ltx_text\" id=\"S5.T1.36.36.37.1.2.1\">CUTS</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" colspan=\"2\" id=\"S5.T1.36.36.37.1.3\"><span class=\"ltx_text\" id=\"S5.T1.36.36.37.1.3.1\">HOLES</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.36.36.38.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S5.T1.36.36.38.2.1\">Training-set</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S5.T1.36.36.38.2.2\"><span class=\"ltx_text\" id=\"S5.T1.36.36.38.2.2.1\">CUTS</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S5.T1.36.36.38.2.3\"><span class=\"ltx_text\" id=\"S5.T1.36.36.38.2.3.1\">HOLES</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S5.T1.36.36.38.2.4\"><span class=\"ltx_text\" id=\"S5.T1.36.36.38.2.4.1\">CUTS</span></td>\n<td class=\"ltx_td ltx_nopad_r ltx_align_center ltx_border_tt\" id=\"S5.T1.36.36.38.2.5\"><span class=\"ltx_text\" id=\"S5.T1.36.36.38.2.5.1\">HOLES</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.36.36.39.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" colspan=\"5\" id=\"S5.T1.36.36.39.3.1\">Axiomatic methods</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.3.3.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T1.1.1.1.1\">PFM <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2310.14692v3#bib.bib39\" title=\"\">39</a>]</cite>Zoomout</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" colspan=\"2\" id=\"S5.T1.2.2.2.2\">9.7 9.0</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" colspan=\"2\" id=\"S5.T1.3.3.3.3\">23.2 22.4</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.6.6.6\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T1.4.4.4.1\">FSP <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2310.14692v3#bib.bib27\" title=\"\">27</a>]</cite> Zoomout</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" colspan=\"2\" id=\"S5.T1.5.5.5.2\">16.1 15.2</td>\n<td class=\"ltx_td ltx_align_center\" colspan=\"2\" id=\"S5.T1.6.6.6.3\">33.7 32.7</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.36.36.40.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" colspan=\"5\" id=\"S5.T1.36.36.40.4.1\">Supervised methods</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.11.11.11\">\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T1.7.7.7.1\">GeomFMaps <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2310.14692v3#bib.bib16\" title=\"\">16</a>]</cite> Zoomout</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T1.8.8.8.2\">12.8 10.4</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T1.9.9.9.3\">19.8 16.7</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T1.10.10.10.4\">20.6 17.4</td>\n<td class=\"ltx_td ltx_nopad_r ltx_align_center ltx_border_t\" id=\"S5.T1.11.11.11.5\">15.3 13.0</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.16.16.16\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T1.12.12.12.1\">DPFM <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2310.14692v3#bib.bib4\" title=\"\">4</a>]</cite> Zoomout</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T1.13.13.13.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T1.13.13.13.2.1\">3.2 1.8</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T1.14.14.14.3\">\n<span class=\"ltx_text ltx_font_bold\" id=\"S5.T1.14.14.14.3.1\">8.6</span> <span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"S5.T1.14.14.14.3.2\">7.4</span>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T1.15.15.15.4\">\n<span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"S5.T1.15.15.15.4.1\">15.8</span> 13.9</td>\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S5.T1.16.16.16.5\">13.1 11.9</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.36.36.41.5\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" colspan=\"5\" id=\"S5.T1.36.36.41.5.1\">Unsupervised methods</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.21.21.21\">\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T1.17.17.17.1\">Unsupervised-DPFM <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2310.14692v3#bib.bib4\" title=\"\">4</a>]</cite> Zoomout</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T1.18.18.18.2\">11.8 12.8</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T1.19.19.19.3\">19.5 18.7</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T1.20.20.20.4\">19.1 18.3</td>\n<td class=\"ltx_td ltx_nopad_r ltx_align_center ltx_border_t\" id=\"S5.T1.21.21.21.5\">17.5 16.2</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.26.26.26\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T1.22.22.22.1\">RobustFMnet <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2310.14692v3#bib.bib14\" title=\"\">14</a>]</cite> Refinement</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T1.23.23.23.2\">16.9 10.6</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T1.24.24.24.3\">22.716.6</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T1.25.25.25.4\">18.7 16.2</td>\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S5.T1.26.26.26.5\">23.5 18.8</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.31.31.31\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T1.27.27.27.1\">Proposed Orthogonal Refinement</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T1.28.28.28.2\">\n<span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"S5.T1.28.28.28.2.1\">6.9</span> 5.6</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T1.29.29.29.3\">12.2 8.0</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T1.30.30.30.4\">\n<span class=\"ltx_text ltx_font_bold\" id=\"S5.T1.30.30.30.4.1\">14.2</span> <span class=\"ltx_text ltx_font_bold\" id=\"S5.T1.30.30.30.4.2\">10.2</span>\n</td>\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S5.T1.31.31.31.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T1.31.31.31.5.1\">11.4 7.9</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.36.36.36\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_r\" id=\"S5.T1.32.32.32.1\">Proposed LPF Refinement</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_r\" id=\"S5.T1.33.33.33.2\">7.1 <span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"S5.T1.33.33.33.2.1\">4.7</span>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_r\" id=\"S5.T1.34.34.34.3\">\n<span class=\"ltx_text ltx_font_bold\" id=\"S5.T1.34.34.34.3.1\">8.6</span> <span class=\"ltx_text ltx_font_bold\" id=\"S5.T1.34.34.34.3.2\">5.5</span>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_r\" id=\"S5.T1.35.35.35.4\">16.4 <span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"S5.T1.35.35.35.4.1\">11.6</span>\n</td>\n<td class=\"ltx_td ltx_nopad_r ltx_align_center ltx_border_bb\" id=\"S5.T1.36.36.36.5\">\n<span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"S5.T1.36.36.36.5.1\">12.3</span> <span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"S5.T1.36.36.36.5.2\">8.6</span>\n</td>\n</tr>\n</tbody>\n</table>\n</span></div>\n</figure>",
|
| 76 |
+
"capture": "Table 1: Quantitative results on SHREC\u201916.\nThe numbers correspond to the mean geodesic error (scaled by 100), and the result using post processing refinement.\nBest performance is marked in bold and second best is underlined.\nThe results demonstrate the superiority of the proposed method in the unsupervised arena.\nIt outperformed both supervised and unsupervised approaches on the HOLES benchmark, which contains extremely challenging scenarios of shape parts.\n"
|
| 77 |
+
},
|
| 78 |
+
"2": {
|
| 79 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S5.T2\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S5.T2.2.1.1\" style=\"font-size:90%;\">Table 2</span>: </span><span class=\"ltx_text\" id=\"S5.T2.3.2\" style=\"font-size:90%;\">\nQuantitative results on the new PFAUST benchmark.\nThe numbers correspond to the mean geodesic error (scaled by 100) without post-processing refinement.\nBest performance is marked in bold.\nThese results are consistent with those obtained on SHREC\u201916.\nAs expected, the proposed method performs better than previous unsupervised ones and achieves on par results as the supervised one.\n</span></figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S5.T2.4\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S5.T2.4.1.1\">\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_r ltx_border_tt\" colspan=\"2\" id=\"S5.T2.4.1.1.1\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_tt\" id=\"S5.T2.4.1.1.2\"><span class=\"ltx_text\" id=\"S5.T2.4.1.1.2.1\">PFAUST-M</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S5.T2.4.1.1.3\"><span class=\"ltx_text\" id=\"S5.T2.4.1.1.3.1\">PFAUST-H</span></th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.4.2.2\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S5.T2.4.2.2.1\">Supervised</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S5.T2.4.2.2.2\">DPFM <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2310.14692v3#bib.bib4\" title=\"\">4</a>]</cite>\n</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S5.T2.4.2.2.3\">3.0</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S5.T2.4.2.2.4\">6.8</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S5.T2.4.3.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_r ltx_border_t\" id=\"S5.T2.4.3.1.1\" rowspan=\"3\"><span class=\"ltx_text\" id=\"S5.T2.4.3.1.1.1\">Unsupervised</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T2.4.3.1.2\">Unsupervised-DPFM <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2310.14692v3#bib.bib4\" title=\"\">4</a>]</cite>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T2.4.3.1.3\">9.3</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.4.3.1.4\">12.7</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.4.4.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.4.4.2.1\">RobustFMnet <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2310.14692v3#bib.bib14\" title=\"\">14</a>]</cite>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.4.4.2.2\">7.9</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.4.4.2.3\">12.4</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.4.5.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_r\" id=\"S5.T2.4.5.3.1\">Proposed method</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_r\" id=\"S5.T2.4.5.3.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.4.5.3.2.1\">5.1</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S5.T2.4.5.3.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.4.5.3.3.1\">7.9</span></td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 80 |
+
"capture": "Table 2: \nQuantitative results on the new PFAUST benchmark.\nThe numbers correspond to the mean geodesic error (scaled by 100) without post-processing refinement.\nBest performance is marked in bold.\nThese results are consistent with those obtained on SHREC\u201916.\nAs expected, the proposed method performs better than previous unsupervised ones and achieves on par results as the supervised one.\n"
|
| 81 |
+
},
|
| 82 |
+
"3": {
|
| 83 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S5.T3\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S5.T3.13.4.1\" style=\"font-size:90%;\">Table 3</span>: </span><span class=\"ltx_text\" id=\"S5.T3.6.3\" style=\"font-size:90%;\">\nLoss ablation study results.\nThe results demonstrate that the performance of our method mostly relies on , whereas and act mostly as regularisation.\n</span></figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S5.T3.11\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S5.T3.11.6.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_th_row ltx_border_r ltx_border_tt\" id=\"S5.T3.11.6.1.1\"><span class=\"ltx_text\" id=\"S5.T3.11.6.1.1.1\" style=\"font-size:80%;\">Loss functions</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_tt\" id=\"S5.T3.11.6.1.2\"><span class=\"ltx_text\" id=\"S5.T3.11.6.1.2.1\" style=\"font-size:80%;\">CUTS</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S5.T3.11.6.1.3\"><span class=\"ltx_text\" id=\"S5.T3.11.6.1.3.1\" style=\"font-size:80%;\">HOLES</span></th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S5.T3.7.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"S5.T3.7.1.1\"></th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T3.7.1.2\"><span class=\"ltx_text\" id=\"S5.T3.7.1.2.1\" style=\"font-size:80%;\">24</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T3.7.1.3\"><span class=\"ltx_text\" id=\"S5.T3.7.1.3.1\" style=\"font-size:80%;\">14</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.8.2\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r\" id=\"S5.T3.8.2.1\"></th>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.8.2.2\"><span class=\"ltx_text\" id=\"S5.T3.8.2.2.1\" style=\"font-size:80%;\">13</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.8.2.3\"><span class=\"ltx_text\" id=\"S5.T3.8.2.3.1\" style=\"font-size:80%;\">13</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.9.3\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r\" id=\"S5.T3.9.3.1\"></th>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.9.3.2\"><span class=\"ltx_text\" id=\"S5.T3.9.3.2.1\" style=\"font-size:80%;\">10</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.9.3.3\"><span class=\"ltx_text\" id=\"S5.T3.9.3.3.1\" style=\"font-size:80%;\">14</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.10.4\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r\" id=\"S5.T3.10.4.1\"></th>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.10.4.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T3.10.4.2.1\" style=\"font-size:80%;\">6.9</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.10.4.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T3.10.4.3.1\" style=\"font-size:80%;\">11.4</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.11.5\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_bb ltx_border_r\" id=\"S5.T3.11.5.1\"></th>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_r\" id=\"S5.T3.11.5.2\"><span class=\"ltx_text\" id=\"S5.T3.11.5.2.1\" style=\"font-size:80%;\">7.1</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S5.T3.11.5.3\"><span class=\"ltx_text\" id=\"S5.T3.11.5.3.1\" style=\"font-size:80%;\">12.3</span></td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 84 |
+
"capture": "Table 3: \nLoss ablation study results.\nThe results demonstrate that the performance of our method mostly relies on , whereas and act mostly as regularisation.\n"
|
| 85 |
+
},
|
| 86 |
+
"4": {
|
| 87 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S5.T4\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S5.T4.2.1.1\" style=\"font-size:90%;\">Table 4</span>: </span><span class=\"ltx_text\" id=\"S5.T4.3.2\" style=\"font-size:90%;\">\nPipeline ablation study results.\nThe correspondence matrix is either estimated from the functional map extracted from a regularized FM-layer or, as we propose, directly from the cosine similarity between features.\nThe results demonstrate the benefit of avoiding the use of an FM-layer in partial shape matching.\nThe effect is even more acute when considering more challenging scenarios, like the HOLES dataset.\n</span></figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S5.T4.4\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S5.T4.4.1.1\">\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_r ltx_border_tt\" id=\"S5.T4.4.1.1.1\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_tt\" id=\"S5.T4.4.1.1.2\"><span class=\"ltx_text\" id=\"S5.T4.4.1.1.2.1\">CUTS</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S5.T4.4.1.1.3\"><span class=\"ltx_text\" id=\"S5.T4.4.1.1.3.1\">HOLES</span></th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S5.T4.4.2.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T4.4.2.1.1\">Regularized FM</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T4.4.2.1.2\">8.5</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T4.4.2.1.3\">15.8</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T4.4.3.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_r\" id=\"S5.T4.4.3.2.1\">Proposed method</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_r\" id=\"S5.T4.4.3.2.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T4.4.3.2.2.1\">6.9</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S5.T4.4.3.2.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T4.4.3.2.3.1\">11.4</span></td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 88 |
+
"capture": "Table 4: \nPipeline ablation study results.\nThe correspondence matrix is either estimated from the functional map extracted from a regularized FM-layer or, as we propose, directly from the cosine similarity between features.\nThe results demonstrate the benefit of avoiding the use of an FM-layer in partial shape matching.\nThe effect is even more acute when considering more challenging scenarios, like the HOLES dataset.\n"
|
| 89 |
+
}
|
| 90 |
+
},
|
| 91 |
+
"image_paths": {
|
| 92 |
+
"1": {
|
| 93 |
+
"figure_path": "2310.14692v3_figure_1.png",
|
| 94 |
+
"caption": "Figure 1: Overview of the proposed pipeline.\nBasic features computed for the full and partial shapes are refined using a Siamese neural diffusion feature extractor.\nNext, cosine similarity is computed and fed into a softmax layer that produces a soft correspondence.\nWhile training, the loss functions are applied to the soft correspondence.\nAt inference, the soft correspondence matrix is binarized for sharp matching.",
|
| 95 |
+
"url": "http://arxiv.org/html/2310.14692v3/x1.png"
|
| 96 |
+
},
|
| 97 |
+
"2(a)": {
|
| 98 |
+
"figure_path": "2310.14692v3_figure_2(a).png",
|
| 99 |
+
"caption": "Figure 2: \nEstimating the functional map \ud835\udc6a^y\u2062xsubscript^\ud835\udc6a\ud835\udc66\ud835\udc65\\hat{\\boldsymbol{C}}_{yx}over^ start_ARG bold_italic_C end_ARG start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT between a full \ud835\udcb3\ud835\udcb3\\mathcal{X}caligraphic_X and its part \ud835\udcb4\ud835\udcb4\\mathcal{Y}caligraphic_Y using features independently extracted for each yields errors.\nRecall, \ud835\udc6a^y\u2062x=\ud835\udc6ay\u2062x+\ud835\udc6ay\u2062xEsubscript^\ud835\udc6a\ud835\udc66\ud835\udc65subscript\ud835\udc6a\ud835\udc66\ud835\udc65superscriptsubscript\ud835\udc6a\ud835\udc66\ud835\udc65\ud835\udc38\\hat{\\boldsymbol{C}}_{yx}=\\boldsymbol{C}_{yx}+\\boldsymbol{C}_{yx}^{E}over^ start_ARG bold_italic_C end_ARG start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT = bold_italic_C start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT + bold_italic_C start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT start_POSTSUPERSCRIPT italic_E end_POSTSUPERSCRIPT, where \ud835\udc6ay\u2062xsubscript\ud835\udc6a\ud835\udc66\ud835\udc65\\boldsymbol{C}_{yx}bold_italic_C start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT is the ideal functional map given the correct matching, and \ud835\udc6ay\u2062xEsuperscriptsubscript\ud835\udc6a\ud835\udc66\ud835\udc65\ud835\udc38\\boldsymbol{C}_{yx}^{E}bold_italic_C start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT start_POSTSUPERSCRIPT italic_E end_POSTSUPERSCRIPT is an error resulting from matching the part \ud835\udcb4\ud835\udcb4\\mathcal{Y}caligraphic_Y to its complementary part in \ud835\udcb3\ud835\udcb3\\mathcal{X}caligraphic_X.\nHere, we plot the magnitude of entries in |\ud835\udc6ay\u2062x|subscript\ud835\udc6a\ud835\udc66\ud835\udc65|\\boldsymbol{C}_{yx}|| bold_italic_C start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT | and |\ud835\udc6ay\u2062xE|superscriptsubscript\ud835\udc6a\ud835\udc66\ud835\udc65\ud835\udc38|\\boldsymbol{C}_{yx}^{E}|| bold_italic_C start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT start_POSTSUPERSCRIPT italic_E end_POSTSUPERSCRIPT |.\nIndeed, \ud835\udc6ay\u2062xsubscript\ud835\udc6a\ud835\udc66\ud835\udc65\\boldsymbol{C}_{yx}bold_italic_C start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT contains an informative structure, whereas \ud835\udc6ay\u2062xEsuperscriptsubscript\ud835\udc6a\ud835\udc66\ud835\udc65\ud835\udc38\\boldsymbol{C}_{yx}^{E}bold_italic_C start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT start_POSTSUPERSCRIPT italic_E end_POSTSUPERSCRIPT is a noise-like structure-less matrix.",
|
| 100 |
+
"url": "http://arxiv.org/html/2310.14692v3/x2.png"
|
| 101 |
+
},
|
| 102 |
+
"2(b)": {
|
| 103 |
+
"figure_path": "2310.14692v3_figure_2(b).png",
|
| 104 |
+
"caption": "Figure 2: \nEstimating the functional map \ud835\udc6a^y\u2062xsubscript^\ud835\udc6a\ud835\udc66\ud835\udc65\\hat{\\boldsymbol{C}}_{yx}over^ start_ARG bold_italic_C end_ARG start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT between a full \ud835\udcb3\ud835\udcb3\\mathcal{X}caligraphic_X and its part \ud835\udcb4\ud835\udcb4\\mathcal{Y}caligraphic_Y using features independently extracted for each yields errors.\nRecall, \ud835\udc6a^y\u2062x=\ud835\udc6ay\u2062x+\ud835\udc6ay\u2062xEsubscript^\ud835\udc6a\ud835\udc66\ud835\udc65subscript\ud835\udc6a\ud835\udc66\ud835\udc65superscriptsubscript\ud835\udc6a\ud835\udc66\ud835\udc65\ud835\udc38\\hat{\\boldsymbol{C}}_{yx}=\\boldsymbol{C}_{yx}+\\boldsymbol{C}_{yx}^{E}over^ start_ARG bold_italic_C end_ARG start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT = bold_italic_C start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT + bold_italic_C start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT start_POSTSUPERSCRIPT italic_E end_POSTSUPERSCRIPT, where \ud835\udc6ay\u2062xsubscript\ud835\udc6a\ud835\udc66\ud835\udc65\\boldsymbol{C}_{yx}bold_italic_C start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT is the ideal functional map given the correct matching, and \ud835\udc6ay\u2062xEsuperscriptsubscript\ud835\udc6a\ud835\udc66\ud835\udc65\ud835\udc38\\boldsymbol{C}_{yx}^{E}bold_italic_C start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT start_POSTSUPERSCRIPT italic_E end_POSTSUPERSCRIPT is an error resulting from matching the part \ud835\udcb4\ud835\udcb4\\mathcal{Y}caligraphic_Y to its complementary part in \ud835\udcb3\ud835\udcb3\\mathcal{X}caligraphic_X.\nHere, we plot the magnitude of entries in |\ud835\udc6ay\u2062x|subscript\ud835\udc6a\ud835\udc66\ud835\udc65|\\boldsymbol{C}_{yx}|| bold_italic_C start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT | and |\ud835\udc6ay\u2062xE|superscriptsubscript\ud835\udc6a\ud835\udc66\ud835\udc65\ud835\udc38|\\boldsymbol{C}_{yx}^{E}|| bold_italic_C start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT start_POSTSUPERSCRIPT italic_E end_POSTSUPERSCRIPT |.\nIndeed, \ud835\udc6ay\u2062xsubscript\ud835\udc6a\ud835\udc66\ud835\udc65\\boldsymbol{C}_{yx}bold_italic_C start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT contains an informative structure, whereas \ud835\udc6ay\u2062xEsuperscriptsubscript\ud835\udc6a\ud835\udc66\ud835\udc65\ud835\udc38\\boldsymbol{C}_{yx}^{E}bold_italic_C start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT start_POSTSUPERSCRIPT italic_E end_POSTSUPERSCRIPT is a noise-like structure-less matrix.",
|
| 105 |
+
"url": "http://arxiv.org/html/2310.14692v3/x3.png"
|
| 106 |
+
},
|
| 107 |
+
"2(c)": {
|
| 108 |
+
"figure_path": "2310.14692v3_figure_2(c).png",
|
| 109 |
+
"caption": "Figure 2: \nEstimating the functional map \ud835\udc6a^y\u2062xsubscript^\ud835\udc6a\ud835\udc66\ud835\udc65\\hat{\\boldsymbol{C}}_{yx}over^ start_ARG bold_italic_C end_ARG start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT between a full \ud835\udcb3\ud835\udcb3\\mathcal{X}caligraphic_X and its part \ud835\udcb4\ud835\udcb4\\mathcal{Y}caligraphic_Y using features independently extracted for each yields errors.\nRecall, \ud835\udc6a^y\u2062x=\ud835\udc6ay\u2062x+\ud835\udc6ay\u2062xEsubscript^\ud835\udc6a\ud835\udc66\ud835\udc65subscript\ud835\udc6a\ud835\udc66\ud835\udc65superscriptsubscript\ud835\udc6a\ud835\udc66\ud835\udc65\ud835\udc38\\hat{\\boldsymbol{C}}_{yx}=\\boldsymbol{C}_{yx}+\\boldsymbol{C}_{yx}^{E}over^ start_ARG bold_italic_C end_ARG start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT = bold_italic_C start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT + bold_italic_C start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT start_POSTSUPERSCRIPT italic_E end_POSTSUPERSCRIPT, where \ud835\udc6ay\u2062xsubscript\ud835\udc6a\ud835\udc66\ud835\udc65\\boldsymbol{C}_{yx}bold_italic_C start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT is the ideal functional map given the correct matching, and \ud835\udc6ay\u2062xEsuperscriptsubscript\ud835\udc6a\ud835\udc66\ud835\udc65\ud835\udc38\\boldsymbol{C}_{yx}^{E}bold_italic_C start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT start_POSTSUPERSCRIPT italic_E end_POSTSUPERSCRIPT is an error resulting from matching the part \ud835\udcb4\ud835\udcb4\\mathcal{Y}caligraphic_Y to its complementary part in \ud835\udcb3\ud835\udcb3\\mathcal{X}caligraphic_X.\nHere, we plot the magnitude of entries in |\ud835\udc6ay\u2062x|subscript\ud835\udc6a\ud835\udc66\ud835\udc65|\\boldsymbol{C}_{yx}|| bold_italic_C start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT | and |\ud835\udc6ay\u2062xE|superscriptsubscript\ud835\udc6a\ud835\udc66\ud835\udc65\ud835\udc38|\\boldsymbol{C}_{yx}^{E}|| bold_italic_C start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT start_POSTSUPERSCRIPT italic_E end_POSTSUPERSCRIPT |.\nIndeed, \ud835\udc6ay\u2062xsubscript\ud835\udc6a\ud835\udc66\ud835\udc65\\boldsymbol{C}_{yx}bold_italic_C start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT contains an informative structure, whereas \ud835\udc6ay\u2062xEsuperscriptsubscript\ud835\udc6a\ud835\udc66\ud835\udc65\ud835\udc38\\boldsymbol{C}_{yx}^{E}bold_italic_C start_POSTSUBSCRIPT italic_y italic_x end_POSTSUBSCRIPT start_POSTSUPERSCRIPT italic_E end_POSTSUPERSCRIPT is a noise-like structure-less matrix.",
|
| 110 |
+
"url": "http://arxiv.org/html/2310.14692v3/x4.png"
|
| 111 |
+
},
|
| 112 |
+
"3": {
|
| 113 |
+
"figure_path": "2310.14692v3_figure_3.png",
|
| 114 |
+
"caption": "Figure 3: \nQualitative results on SHREC\u201916 CUTS (left) and HOLES (right).\nOn SHREC\u201916 CUTS, we obtain visually appealing results that outperform previous unsupervised methods.\nOn SHREC\u201916 HOLES, we obtain better matching results both visually and quantitatively than even the best supervised approach (DPFM).",
|
| 115 |
+
"url": "http://arxiv.org/html/2310.14692v3/extracted/5965754/section/PartialMatch12.png"
|
| 116 |
+
},
|
| 117 |
+
"4": {
|
| 118 |
+
"figure_path": "2310.14692v3_figure_4.png",
|
| 119 |
+
"caption": "Figure 4: \nExample of shapes existing in our new PFAUST benchmark. The left shape is from PFAUST-M, while the right one is from PFAUST-H, which are of medium and hard difficulty, respectively.",
|
| 120 |
+
"url": "http://arxiv.org/html/2310.14692v3/x5.png"
|
| 121 |
+
},
|
| 122 |
+
"5(a)": {
|
| 123 |
+
"figure_path": "2310.14692v3_figure_5(a).png",
|
| 124 |
+
"caption": "Figure 5: PCK curves of existing unsupervised methods and ours on the test sets of SHREC\u201916 CUTS (top left), SHREC\u201916 HOLES (top right), PFAUST-M (bottom left) and PFAUST-H (bottom right).\nOur method is systematically superior compared to competing unsupervised approaches.",
|
| 125 |
+
"url": "http://arxiv.org/html/2310.14692v3/extracted/5965754/section/pck_curve_shrec_cuts.png"
|
| 126 |
+
},
|
| 127 |
+
"5(b)": {
|
| 128 |
+
"figure_path": "2310.14692v3_figure_5(b).png",
|
| 129 |
+
"caption": "Figure 5: PCK curves of existing unsupervised methods and ours on the test sets of SHREC\u201916 CUTS (top left), SHREC\u201916 HOLES (top right), PFAUST-M (bottom left) and PFAUST-H (bottom right).\nOur method is systematically superior compared to competing unsupervised approaches.",
|
| 130 |
+
"url": "http://arxiv.org/html/2310.14692v3/extracted/5965754/section/pck_curve_shrec_holes.png"
|
| 131 |
+
},
|
| 132 |
+
"5(c)": {
|
| 133 |
+
"figure_path": "2310.14692v3_figure_5(c).png",
|
| 134 |
+
"caption": "Figure 5: PCK curves of existing unsupervised methods and ours on the test sets of SHREC\u201916 CUTS (top left), SHREC\u201916 HOLES (top right), PFAUST-M (bottom left) and PFAUST-H (bottom right).\nOur method is systematically superior compared to competing unsupervised approaches.",
|
| 135 |
+
"url": "http://arxiv.org/html/2310.14692v3/extracted/5965754/section/pck_curve_faust_holes_m.png"
|
| 136 |
+
},
|
| 137 |
+
"5(d)": {
|
| 138 |
+
"figure_path": "2310.14692v3_figure_5(d).png",
|
| 139 |
+
"caption": "Figure 5: PCK curves of existing unsupervised methods and ours on the test sets of SHREC\u201916 CUTS (top left), SHREC\u201916 HOLES (top right), PFAUST-M (bottom left) and PFAUST-H (bottom right).\nOur method is systematically superior compared to competing unsupervised approaches.",
|
| 140 |
+
"url": "http://arxiv.org/html/2310.14692v3/extracted/5965754/section/pck_curve_faust_holes_h.png"
|
| 141 |
+
},
|
| 142 |
+
"6(a)": {
|
| 143 |
+
"figure_path": "2310.14692v3_figure_6(a).png",
|
| 144 |
+
"caption": "Figure 6: Additional qualitative results on the SHREC\u201916 CUTS dataset. Zoom in for a better view.",
|
| 145 |
+
"url": "http://arxiv.org/html/2310.14692v3/extracted/5965754/section/sup_cut_shape_qult_result.001.jpeg"
|
| 146 |
+
},
|
| 147 |
+
"6(b)": {
|
| 148 |
+
"figure_path": "2310.14692v3_figure_6(b).png",
|
| 149 |
+
"caption": "Figure 6: Additional qualitative results on the SHREC\u201916 CUTS dataset. Zoom in for a better view.",
|
| 150 |
+
"url": "http://arxiv.org/html/2310.14692v3/extracted/5965754/section/sup_cut_shape_qult_result.002.jpeg"
|
| 151 |
+
},
|
| 152 |
+
"7(a)": {
|
| 153 |
+
"figure_path": "2310.14692v3_figure_7(a).png",
|
| 154 |
+
"caption": "Figure 7: Additional qualitative results on the SHREC\u201916 HOLES dataset. Zoom in for a better view.",
|
| 155 |
+
"url": "http://arxiv.org/html/2310.14692v3/extracted/5965754/section/sup_cut_shape_qult_result.003.jpeg"
|
| 156 |
+
},
|
| 157 |
+
"7(b)": {
|
| 158 |
+
"figure_path": "2310.14692v3_figure_7(b).png",
|
| 159 |
+
"caption": "Figure 7: Additional qualitative results on the SHREC\u201916 HOLES dataset. Zoom in for a better view.",
|
| 160 |
+
"url": "http://arxiv.org/html/2310.14692v3/extracted/5965754/section/sup_cut_shape_qult_result.004.jpeg"
|
| 161 |
+
},
|
| 162 |
+
"8": {
|
| 163 |
+
"figure_path": "2310.14692v3_figure_8.png",
|
| 164 |
+
"caption": "Figure 8: Qualitative results on PFAUST-M of our method and RobustFMnet [14] and UnsupDPFM [4], zoom in for a better view.\nWe obtain visually appealing results that outperform previous unsupervised methods. This figure also presents the shape partiality present in PFAUST-M, which mostly consists in body parts removal due to the size of the holes created on the original shapes.",
|
| 165 |
+
"url": "http://arxiv.org/html/2310.14692v3/x6.png"
|
| 166 |
+
},
|
| 167 |
+
"9": {
|
| 168 |
+
"figure_path": "2310.14692v3_figure_9.png",
|
| 169 |
+
"caption": "Figure 9: Qualitative results on PFAUST-H of our method and RobustFMnet [14] and UnsupDPFM [4]. Zoom in for a better view.\nWe obtain visually appealing results that outperform previous unsupervised methods. This figure also presents the shape partiality present in PFAUST-H, which involves extremely challenging topology with 13131313 holes.",
|
| 170 |
+
"url": "http://arxiv.org/html/2310.14692v3/x7.png"
|
| 171 |
+
}
|
| 172 |
+
},
|
| 173 |
+
"validation": true,
|
| 174 |
+
"references": [],
|
| 175 |
+
"url": "http://arxiv.org/html/2310.14692v3"
|
| 176 |
+
}
|
20241030/2310.19453v4.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2311.00277v3.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2311.03857v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2311.08110v3.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2311.08593v2.json
ADDED
|
@@ -0,0 +1,290 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Summarization-Based Document IDs for Generative Retrieval with Language Models",
|
| 3 |
+
"abstract": "Generative retrieval (Wang et al., 2022; Tay et al., 2022) is a popular approach for end-to-end document retrieval that directly generates document identifiers given an input query. We introduce summarization-based document IDs, in which each document\u2019s ID is composed of an extractive summary or abstractive keyphrases generated by a language model, rather than an integer ID sequence or bags of n-grams as proposed in past work. We find that abstractive, content-based IDs (ACID) and an ID based on the first 30 tokens are very effective in direct comparisons with previous approaches to ID creation. We show that using ACID improves top-10 and top-20 recall by 15.6% and 14.4% (relative) respectively versus the cluster-based integer ID baseline on the MSMARCO 100k retrieval task, and 9.8% and 9.9% respectively on the Wikipedia-based NQ 100k retrieval task. Our results demonstrate the effectiveness of human-readable, natural-language IDs created through summarization for generative retrieval. We also observed that extractive IDs outperformed abstractive IDs on Wikipedia articles in NQ but not the snippets in MSMARCO, which suggests that document characteristics affect generative retrieval performance.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Wikipedia-based corpora have long been an important part of NLP research and form a natural benchmark for studying new techniques in text-based recommender and information retrieval systems. In this work, we examine how generative retrieval behaves on short-form and long-form documents drawn from Wikipedia and non-Wikipedia sources. We also propose a new type of document ID for generative retrieval based on document summarization, which demonstrably improves retrieval performance across the tasks that we examined.\nLarge language models (LMs) are now widely used across many NLP tasks, and extensions of generative models to document retrieval tasks have recently been proposed (Wang et al., 2022 ###reference_b16###; Tay et al., 2022 ###reference_b15###), in contrast to vector-based approaches like dense passage retrieval (DPR; Karpukhin et al., 2020 ###reference_b7###). DPR is a widely-used technique for training document retrieval models, where queries and documents are mapped to dense vector representations with a transformer encoder (e.g., BERT; Devlin et al., 2019 ###reference_b5###). By increasing the cosine similarity between positive query-document pairs and decreasing it between negative pairs, DPR performs metric learning over the space of queries and the set of documents to be indexed.\nGenerative alternatives to document retrieval address certain limitations of dense, vector-based approaches to retrieval. For example, query and document representations are constructed separately in DPR, which precludes complex query-document interactions. Using a single dense vector to represent an entire document limits the amount of information that can be stored; indeed, Tay et al. (2022 ###reference_b15###) observed that increasing the number of parameters in the encoder does not significantly enhance DPR performance. Furthermore, the rich sequence generation capabilities of language models (LMs) cannot be used directly in dense retrieval. Tay et al. (2022 ###reference_b15###) and Wang et al. (2022 ###reference_b16###) therefore proposed a new direction called generative retrieval, where LMs learn to directly map queries to an identifier that is unique to each document. We illustrate the differences in Figure 1 ###reference_###.\n###figure_1### Instead of retrieving documents based on cosine similarity, generative retrieval uses an LM to produce a sequence of tokens encoding the relevant document\u2019s ID, conditional on the query. Decoding constraints are applied to ensure that only document IDs that exist in the corpus are generated. Tay et al. (2022 ###reference_b15###) and Wang et al. (2022 ###reference_b16###) showed that generative retrieval outperformed DPR on information retrieval benchmarks like Natural Questions (Kwiatkowski et al., 2019 ###reference_b8###) and TriviaQA (Joshi et al., 2017 ###reference_b6###), and subsequent publications have corroborated their findings on other retrieval tasks like multilingual retrieval (Zhuang et al., 2023 ###reference_b19###).\nState-of-the-art generative retrieval models rely on document clustering to create document IDs, following the work of both Wang et al. (2022 ###reference_b16###) and Tay et al. (2022 ###reference_b15###), and the resulting document ID is an integer sequence corresponding to the clusters that the document belongs to. However, generating arbitrary sequences of integers is very different from what LMs are designed to do, since LMs are pretrained to generate natural language. In addition to negatively impacting LM generation performance, cluster-based integer IDs are not human-readable and require re-clustering if a substantial number of new documents are added to the index.\nTo address the issues with cluster-based IDs, we consider summarization-based document IDs, which are human-readable, natural-language document IDs. We propose ACID, an Abstractive, Content-based ID assignment method for documents, alongside simpler IDs based on extractive summarization. ACID uses a language model (GPT-3.5 in our experiments) to generate a short sequence of abstractive keyphrases from the document\u2019s contents to serve as the document ID, rather than a hierarchical clustering ID or an arbitrary integer sequence.\nWe also consider creating content-based IDs extractively: taking the first 30 tokens of each document as its ID or choosing the top-30 keywords with respect to BM25 scores.\nWe find that ACID generally outperforms the cluster-based IDs for generative retrieval (as well as the extractive methods) in direct comparisons on standard retrieval benchmarks. We also observe that longer extractive document IDs are helpful for retrieving long documents, such as the Wikipedia articles in the NQ benchmark, versus the shorter document fragments from the MSMARCO dataset.\nFinally, we examine the effect of hyperparameters like model size and beam width on retrieval performance, and compare how cluster-based IDs and summarization-based IDs behave under different settings.\nThe code for reproducing our results and the keyword-augmented datasets can be found at https://github.com/lihaoxin2020/Summarization-Based-Document-IDs-for-Generative-Retrieval ###reference_ion-Based-Document-IDs-for-Generative-Retrieval###, and the data can be found at https://huggingface.co/datasets/lihaoxin2020/abstractive-content-based-IDs ###reference_20/abstractive-content-based-IDs###."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "IDs for Generative Retrieval",
|
| 15 |
+
"text": "Since generative retrieval is a comparatively new approach for document retrieval, there is significant variation in the literature on how language models are trained to map queries to document IDs. Tay et al. (2022 ###reference_b15###) distinguish between the \u2018indexing\u2019 step (where the LM is trained to link spans from the training, development, and test documents to their document IDs) and the \u2018finetuning\u2019 step (where the training query-document pairs are used to finetune the LM for retrieval). Note that generative retrieval models must index all documents, including the development and test documents, in order for the language model to be aware of their document IDs at inference time. Additionally, Wang et al. (2022 ###reference_b16###) and Zhuang et al. (2023 ###reference_b19###) perform data augmentation in the indexing and finetuning steps by introducing \u2018synthetic\u2019 queries, where a query generation model (Nogueira et al., 2019 ###reference_b11###) based on T5 (Raffel et al., 2020 ###reference_b13###) generates additional queries for each document.\nIn the three subsections that follow, we elaborate on each of the steps for generative retrieval. Figure 2 ###reference_### depicts the steps needed to create our summarization-based document IDs, perform data augmentation, index the documents with the LM, and finetune the LM for generative retrieval."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "2.1",
|
| 19 |
+
"parent_section_id": "2",
|
| 20 |
+
"section_name": "Document ID Creation",
|
| 21 |
+
"text": "In Table 1 ###reference_###, we provide an example of a document about engineering sub-disciplines and the cluster-based and content-based IDs that would be derived from it. From the example, it is clear why we would expect ACID to outperform cluster IDs, since it is straightforward for LMs to generate the keyphrase sequence given an engineering-related query. The cluster ID, on the other hand, resembles an integer hash of the document (with some semantic information carried over from the clustering).\nAbstractive, Content-based IDs. We create natural language IDs for every document to be indexed by generating keyphrases. Tokens from the document (up to the maximum context size of 4000 tokens) are used as part of a prompt to an LM to generate 5 keyphrases. The keyphrases are a brief abstractive summary of the topics in the document. The keyphrases are concatenated together to form the ACID for each document. We create IDs for every document in the training, development, and test sets.\nWe chose the GPT-3.5 API provided by OpenAI to generate keyphrases, though any reasonable pretrained LM can be used instead. The prompt that we used was:\n{adjustwidth}0.3cm0.3cm\nGenerate no more than 5 key phrases describing the topics in this document. Do not include things like the Wikipedia terms and conditions, licenses, or references section in the list: (document body here)\nExtractive Summary IDs. We consider two types of extractive summary IDs: a bag of unigrams selected based on BM25 scores, and the first tokens of the document. For many types of documents (e.g., news articles, Wikipedia articles, scientific papers), the first few sentences would generally provide an overview of the contents of the document, which motivates our choice of the first tokens as a kind of extractive document ID.\nCluster-based IDs. By way of comparison with our proposed IDs, cluster-based IDs are integer sequences. An encoder creates an embedding vector for each document in the dataset, and the document embeddings are clustered using the -means algorithm. If the number of documents in a cluster exceeds a predefined maximum, then subclusters are created recursively, until all subclusters contain fewer documents than the maximum. Each document\u2019s ID is a sequence of integers, corresponding to the path to the document through the tree of hierarchical clusters. The number of clusters at each level and the maximum number of documents in each cluster are hyperparameters. (For example, the values reported by Wang et al., 2022 ###reference_b16###, were 10 and 100 respectively, which we also use in our experiments.)\n###figure_2###"
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2.2",
|
| 25 |
+
"parent_section_id": "2",
|
| 26 |
+
"section_name": "Document Indexing and Supervised Finetuning",
|
| 27 |
+
"text": "We first index all of the documents in the training, development, and test sets. For indexing purposes, we consider input/output pairs of the form\n(synthetic query, document ID).\nIn other words, the LM is trained to generate the relevant document ID, given a randomly selected document span or a synthetic query, as part of the indexing task. We use a T5-based query generation model to provide synthetic queries given the body of each document, which serves as a form of data augmentation independent of the queries in the training data. Note that, in our experiments, only synthetic queries are used during the indexing step. Although random document spans are used in other generative retrieval papers, we did not observe an improvement by doing so.\nAfter document indexing, we finetune the model on the retrieval training data:\n(user-generated query, document ID)\nIn other words, the LM is trained to generate the document ID, given a real, user-generated query."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "2.3",
|
| 31 |
+
"parent_section_id": "2",
|
| 32 |
+
"section_name": "Retrieving Documents",
|
| 33 |
+
"text": "At inference time, the LM generates a document ID via beam search, given a user-generated query from the test set. We use a constrained decoder at inference time, which is constrained by a prefix tree such that it can only generate document IDs that exist in the corpus. Since each document ID maps to a unique document, it is straightforward to compute the proportion of queries for which the model retrieved the correct document. Model performance is measured based on the recall of relevant documents retrieved within the top-1, top-10, and top-20 results in our experiments."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "3",
|
| 37 |
+
"parent_section_id": null,
|
| 38 |
+
"section_name": "Experiments",
|
| 39 |
+
"text": "In the experiments below, we demonstrate that summarization-based IDs outperform cluster-based IDs on the NQ and MSMARCO retrieval benchmarks. Simple extractive IDs, like using the first 30 tokens of the document or BM25-based keyword selection, can outperform the cluster-based approach in most cases. We also compare our IDs with another keyword-based document ID method that constructs IDs using learned relevance scores (Zhang et al., 2024 ###reference_b18###). We then show that summarization-based IDs work well across a range of language model sizes (as measured by the total number of parameters). Finally, we show that widening the beam improves retrieval performance meaningfully for ACID, whereas cluster-based IDs benefit from beam width to a lesser degree (or not at all, in the case of the widest beam widths).\nThe BM25-based IDs were created by ranking all of the unique terms in each document by their BM25 scores, and taking the top 30 terms as the document ID. We used Anserini (Yang et al., 2017 ###reference_b17###) to compute BM25 scores for the documents in each corpus. To avoid selecting very rare terms as part of each document\u2019s BM25-based document ID, we required that each term either appear at least 2 times in the document itself, or appear at least 5 times in the corpus.\nWe use the Natural Questions (NQ; Kwiatkowski et al., 2019 ###reference_b8###) and MSMARCO (Bajaj et al., 2016 ###reference_b1###) datasets. For each dataset, we finetune a pretrained language model for retrieval on 1k, 10k, and 100k random samples of the training split. Note that MSMARCO and NQ do not disclose their test sets publicly, and our results are reported on the provided development sets. Since we did not use the entirety of the training data that was available for NQ and MSMARCO, we created separate development sets for them by taking a random sample of each dataset\u2019s training data. We provide the details of each corpus in Table 2 ###reference_###. Document length is highly variable, and we truncate all documents after 4k tokens.\nWe use the Pythia LMs (Biderman et al., 2023 ###reference_b3###) to initialize the retrieval model in our experiments. All of our models are trained on AWS g5 instances equipped with Nvidia A10G GPUs. Models are optimized using AdamW (Loshchilov and Hutter, 2017 ###reference_b9###). We provide the model hyperparameters that were used in the Appendix. The beam width for all experiments is 20, unless stated otherwise.\nIn Table 2 ###reference_###, we provide the basic statistics for the NQ and MSMARCO datasets that we used. We deduplicate documents based on the first 512 tokens of each document, and documents with 95% token overlap are considered duplicates.\nNote that there is a substantial difference in the average document length between NQ and MSMARCO datasets. While NQ and MSMARCO have queries of similar lengths, their document lengths are very different, since NQ documents are complete Wikipedia articles while MSMARCO passages are a few sentences long, excerpted from a longer document."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "4",
|
| 43 |
+
"parent_section_id": null,
|
| 44 |
+
"section_name": "Results",
|
| 45 |
+
"text": "There is substantial variation in the reported results on the NQ dataset among papers that use cluster-based IDs for generative retrieval. In Tay et al. (2022 ###reference_b15###) and Wang et al. (2022 ###reference_b16###), the top-1 recall with the NQ 320k dataset were 27.4% and 65.86% respectively, despite both groups using the same T5-Base model initialization and cluster-based ID approach. There are many possible explanations for the discrepancy (e.g., use of synthetic queries, computational budget, etc.), but at the time of writing, neither paper has made the code or processed data publicly available, which makes replication difficult. For this reason, we focus on internal comparisons rather than external ones, where we control the relevant experimental settings to ensure that the comparisons are fair and the differences in results are meaningful."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "4.1",
|
| 49 |
+
"parent_section_id": "4",
|
| 50 |
+
"section_name": "MSMARCO",
|
| 51 |
+
"text": "MSMARCO 1k\nMSMARCO 10k\nMSMARCO 100k\n\n\nRec@1\n@10\n@20\nRec@1\n@10\n@20\nRec@1\n@10\n@20\n\nBaseline\n\n\n\n\n\n\n\n\n\n\nCluster Integer IDs\n41.1\n59.5\n64.2\n42.4\n62.3\n67.1\n46.8\n68.8\n73.4\n\nExtractive Summarization IDs\n\n\n\n\n\n\n\n\n\n\nBM25 Top-30\n48.7\n74.3\n79.4\n49.1\n75.7\n80.1\n52.0\n79.2\n82.9\n\nFirst 30 Tokens\n49.0\n73.0\n77.8\n48.7\n72.8\n77.9\n51.8\n76.0\n79.6\n\nAbstractive Summarization IDs\n\n\n\n\n\n\n\n\n\n\nACID\n49.1\n74.3\n80.1\n50.4\n76.3\n80.4\n52.9\n79.5\n84.0\nWe begin by examining the performance of our implementations of various types of document IDs on the MSMARCO task. We present the results in Table 3 ###reference_###, and all results are based on a 160M-parameter pretrained Pythia LM. Across all training set sizes, the ACIDs offer better retrieval performance compared to the other ID generation techniques, and summarization-based IDs clearly outperform the cluster integer IDs."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "4.2",
|
| 55 |
+
"parent_section_id": "4",
|
| 56 |
+
"section_name": "Natural Questions",
|
| 57 |
+
"text": "NQ 1k\nNQ 10k\nNQ 100k\n\n\nRec@1\n@10\n@20\nRec@1\n@10\n@20\nRec@1\n@10\n@20\n\nBaselines\n\n\n\n\n\n\n\n\n\n\nBM25\n20.9\n53.8\n62.7\n20.9\n53.8\n62.7\n20.9\n53.8\n62.7\n\nDense Passage Retrieval\n25.8\n62.6\n70.9\n32.8\n74.9\n82.6\n35.5\n78.7\n86.1\n\nCluster Integer IDs\n38.4\n64.2\n69.4\n40.2\n67.5\n72.7\n40.8\n68.2\n73.0\n\nTSGen (Zhang et al., 2024 ###reference_b18###)\n28.8\n67.1\n73.6\n29.2\n67.6\n74.4\n30.3\n71.8\n78.3\n\nSummarization-based IDs\n\n\n\n\n\n\n\n\n\n\nBM25 Top-30\n36.5\n66.1\n70.9\n36.8\n66.1\n71.1\n37.0\n68.2\n72.8\n\nFirst 30 Tokens\n41.9\n66.0\n69.9\n43.3\n67.6\n71.6\n47.7\n71.2\n74.4\n\nACID\n39.2\n69.2\n74.0\n40.5\n70.7\n75.2\n40.9\n74.9\n80.2\n\nSummarization-based IDs with Joint Decoding\n\n\n\n\n\n\n\n\n\n\nFirst 30 Tokens w/ Joint Dec\n49.1\n78.7\n82.6\n49.7\n79.2\n83.1\n55.3\n83.0\n86.4\n\nACID w/ Joint Dec\n41.3\n77.3\n82.5\n41.3\n77.0\n82.9\n42.3\n78.0\n84.0\nIn Table 4 ###reference_###, we compare sparse and dense retrieval techniques against generative retrieval on the NQ dataset. We used the 160M-parameter Pythia LM as our base model to obtain the results in the table. Across the NQ 1k, 10k, and 100k tasks, summarization-based document IDs generally outperform cluster-based integer IDs and TSGen (Zhang et al., 2024 ###reference_b18###). (TSGen learns a scoring function that identifies relevant terms from the document to use as the ID.) As we saw with MSMARCO, the simple approach of using the first 30 tokens from each document to create IDs also outperforms the cluster-based approach.\nWe further improve the performance of the finetuned 160M-parameter model by performing joint decoding with the 12-billion parameter Pythia LM. We provide 8 query-document ID pairs from the training data to the 12B Pythia model for in-context learning. For a given query, we use both the small model and the large model (with the in-context examples) to generate the relevant document ID. The output probabilities from the small and large models are combined using a mixture weight of on the small model.\nWhen we applied joint decoding, the extractive summarization-based document ID that uses the first 30 tokens outperformed all of the other techniques that we examined.\nWe emphasize that this is one of the major advantages of using generative retrieval with natural-language IDs: we can use a pretrained LLM with in-context learning to significantly boost the performance of a smaller finetuned LM. In contrast, generative retrieval that uses integer IDs does not benefit from joint decoding with an LLM, since the integer ID sequences are far from the pretraining distribution and in-context learning provides no benefit.\nWe observed that the top-1 recall with the first 30 tokens as the ID is quite high. This may be due to the structure of the NQ documents, which are Wikipedia articles. The first tokens of every document are the title of the Wikipedia page, and so the first 30 tokens represent a very effective ID for retrieval purposes. Nonetheless, without joint decoding, ACID outperforms the first 30 token IDs at top-10 and top-20 recall."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "4.3",
|
| 61 |
+
"parent_section_id": "4",
|
| 62 |
+
"section_name": "Model Size",
|
| 63 |
+
"text": "###figure_3### We examine whether the relative outperformance of ACIDs versus cluster integer IDs on MSMARCO is affected by the number of parameters in the generative model. Our default experiments in the previous sections used 160M-parameter Pythia models, and in Figure 3 ###reference_### we conduct experiments going up to 2.8B-parameter models.\nWe observe that ACIDs continue to outperform cluster integer IDs, even as we vary the model size. In general, increasing the size of the model leads to an improvement in retrieval performance, regardless of the ID type."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "4.4",
|
| 67 |
+
"parent_section_id": "4",
|
| 68 |
+
"section_name": "Beam Width",
|
| 69 |
+
"text": "From Table 5 ###reference_###, we see that larger beam widths generally improve recall on MSMARCO, though with rapidly diminishing returns. The top-1 recall does not benefit past a beam width of 8, and the recall rapidly plateaus as beam width increases from 1 to 16. This is true for both cluster integer IDs and ACID, though ACID does benefit more in absolute terms than cluster IDs from a wider beam (when comparing a beam width of 1 to a beam width of 16).\nIn the same table, we also examine the effect of very wide beams on recall at 10 and 20 for the MSMARCO dataset. Some benefit is observed when ACID is the document ID, but no improvement is observed for cluster IDs.\nAs discussed previously, the cluster integer ID is typically restricted to a small number of clusters per level (the digits 0 through 9, for example), and so a wide beam in excess of that number doesn\u2019t yield any improvements, whereas ACID does benefit from wider beams, since it is a natural-language ID with access to the full vocabulary of the LM."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "4.5",
|
| 73 |
+
"parent_section_id": "4",
|
| 74 |
+
"section_name": "ID Length",
|
| 75 |
+
"text": "In Table 6 ###reference_###, we present the change in recall on the NQ and MSMARCO tasks depending on the length of the document ID. We use the extractive document ID based on the first 10, 20, 30, and 40 tokens. On MSMARCO 100k, we observe very little change in top-k recall. On NQ 100k, we saw a larger benefit with longer IDs, with the highest recall corresponding to the longest document ID. We speculate that the differences in document length between MSMARCO and NQ ( tokens versus k tokens per document) means that longer IDs tend to benefit the NQ retrieval task more."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "5",
|
| 79 |
+
"parent_section_id": null,
|
| 80 |
+
"section_name": "Related Work",
|
| 81 |
+
"text": "Tay et al. (2022 ###reference_b15###) explore a number of techniques for creating document IDs for generative retrieval, including atomic document IDs, randomly assigned integer IDs, and semantic IDs based on hierarchical clustering. The last technique was found to be the most effective, where the document IDs with were formed via hierarchical -means clustering on BERT-based document vectors. The main difference between that approach and ours is that, during finetuning, their approach requires learning the \u201csemantics\u201d of the cluster IDs, while ours uses natural language phrases that are already in some sense familiar to the pretrained model. Wang et al. (2022 ###reference_b16###) also used IDs based on hierarchical clustering with BERT embeddings and proposed the prefix-aware weight-adaptor (PAWA) modification, where a separate decoder was trained to produce level-specific linear projections to modify the ID decoder\u2019s outputs at each timestep. The authors also incorporated synthetic queries from a doc2query model to augment the user-generated queries in the dataset. Pradeep et al. (2023 ###reference_b12###) scale the cluster ID-based approach to generative retrieval to millions of documents, and explore the impact of adding synthetic queries for documents that do not have a query sourced from a user.\nThe aforementioned papers used IDs that were not optimized for the retrieval task, but other work has explored creating document IDs in a retrieval-aware manner. In Sun et al. (2024 ###reference_b14###), the document IDs are treated as a sequence of fixed-length latent discrete variables which are learned via a document reconstruction loss and the generative retrieval loss. However, the authors reported that this method does experience collisions, as some documents are assigned to the same latent integer ID sequence, though the collision rate was not reported.\nBevilacqua et al. (2022 ###reference_b2###) proposed a model that, given a query, generates the n-grams that should appear in the relevant documents. All documents that contain the generated n-grams are then retrieved and reranked to produce the final search results. (This is in contrast our approach, which seeks to associate a unique ID to each document for generative retrieval.) The authors propose several methods for reranking based on n-gram scores produced by the LM. However, the n-gram generation and reranking approach does not always outperform the dense retrieval baseline. Zhang et al. (2024 ###reference_b18###) creates document IDs by selecting terms from the document based on relevance scores that are learned using a contrastive loss and BERT embeddings.\nIn addition, there is a substantial body of work that involves model-generated text and retrieval. De Cao et al. (2020 ###reference_b4###) generate the text representation of entities autoregressively instead of treating entities as atomic labels in a (potentially very large) vocabulary. Nogueira et al. (2019 ###reference_b11###) use an encoder-decoder model to generate synthetic queries for each document in the index and concatenate them together to improve retrieval performance. The expanded documents are indexed using Anserini and BM25. Synthetic queries from these \u2018doc2query\u2019 models are also used for data augmentation in generative retrieval. Mao et al. (2020 ###reference_b10###) use pretrained language models to expand queries with relevant contexts (e.g., appending the title of a relevant passage to the query, etc.) for retrieval and open-domain question answering."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "6",
|
| 85 |
+
"parent_section_id": null,
|
| 86 |
+
"section_name": "Conclusion",
|
| 87 |
+
"text": "We have demonstrated that summarization-based document IDs are highly effective for generative retrieval. Our results show a clear improvement in retrieval performance on the Natural Questions and MSMARCO datasets versus both cluster-based integer IDs and other keyword-based document IDs. In direct comparisons, abstractive keyphrases work well versus other types of IDs. Surprisingly, we found that the first 30 tokens of a document also works very well among the IDs we tried, but we have not seen this fact documented in the generative retrieval literature. The choice of ID is clearly a major factor in retrieval performance, and we expect that future work will explore other possibilities for creating effective natural-language document IDs.\nWe also observed that the extractive summarization approach (i.e., first-30 tokens as ID) outperforms the abstractive ACID approach for the long Wikipedia articles in the NQ dataset but not for the shorter snippets in the MSMARCO dataset. Clearly, the characteristics of the documents that are indexed affects generative retrieval, and in the case of Wikipedia documents, the initial sentences tend to be an overview of the rest of the article. As the field of generative retrieval continues to evolve, optimizing document ID generation for specific use cases and document collections may become an important area of study."
|
| 88 |
+
}
|
| 89 |
+
],
|
| 90 |
+
"appendix": [],
|
| 91 |
+
"tables": {
|
| 92 |
+
"1": {
|
| 93 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S2.T1\">\n<div class=\"ltx_flex_figure ltx_flex_table\">\n<div class=\"ltx_flex_cell ltx_flex_size_1\">\n<table class=\"ltx_tabular ltx_centering ltx_figure_panel ltx_guessed_headers ltx_align_middle\" id=\"S2.T1.8\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S2.T1.8.1.1\">\n<th class=\"ltx_td ltx_nopad_r ltx_align_justify ltx_th ltx_th_column ltx_border_tt\" id=\"S2.T1.8.1.1.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.8.1.1.1.1\">\n<span class=\"ltx_p\" id=\"S2.T1.8.1.1.1.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.8.1.1.1.1.1.1\">Document Text</span></span>\n</span>\n</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S2.T1.8.2.1\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_justify ltx_border_t\" id=\"S2.T1.8.2.1.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.8.2.1.1.1\">\n<span class=\"ltx_p\" id=\"S2.T1.8.2.1.1.1.1\">List of engineering branches Engineering is the discipline and profession that applies scientific theories , mathematical methods , and empirical evidence to design , create , and analyze technological solutions cognizant of safety , human factors , physical laws , regulations , practicality , and cost . In the contemporary era , engineering is generally considered to consist of the major primary branches of chemical engineering , civil engineering , electrical engineering , and mechanical engineering\u2026</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.8.3.2\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_justify ltx_border_t\" id=\"S2.T1.8.3.2.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.8.3.2.1.1\">\n<span class=\"ltx_p\" id=\"S2.T1.8.3.2.1.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.8.3.2.1.1.1.1\">Cluster-based Document ID</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.8.4.3\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_justify ltx_border_b ltx_border_t\" id=\"S2.T1.8.4.3.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.8.4.3.1.1\">\n<span class=\"ltx_p\" id=\"S2.T1.8.4.3.1.1.1\">9, 5, 1, 9, 6, 1, 0, 4, 8, 1, 3, 1, 2, 9, 0</span>\n</span>\n</td>\n</tr>\n</tbody>\n</table>\n</div>\n<div class=\"ltx_flex_break\"></div>\n<div class=\"ltx_flex_cell ltx_flex_size_1\">\n<table class=\"ltx_tabular ltx_centering ltx_figure_panel ltx_guessed_headers ltx_align_middle\" id=\"S2.T1.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S2.T1.1.2.1\">\n<th class=\"ltx_td ltx_align_justify ltx_th ltx_th_column\" colspan=\"3\" id=\"S2.T1.1.2.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.1.2.1.1.1\">Summarization-based Document IDs</span></th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S2.T1.1.1\">\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S2.T1.1.1.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.1.1.1.1\">\n<span class=\"ltx_p\" id=\"S2.T1.1.1.1.1.1\"><em class=\"ltx_emph ltx_font_italic\" id=\"S2.T1.1.1.1.1.1.1\">First Tokens</em></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S2.T1.1.1.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.1.1.2.1\">\n<span class=\"ltx_p\" id=\"S2.T1.1.1.2.1.1\"><em class=\"ltx_emph ltx_font_italic\" id=\"S2.T1.1.1.2.1.1.1\">BM25 Scoring</em></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_nopad_r ltx_align_justify ltx_border_t\" id=\"S2.T1.1.1.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.1.1.3.1\">\n<span class=\"ltx_p\" id=\"S2.T1.1.1.3.1.1\"><em class=\"ltx_emph ltx_font_italic\" id=\"S2.T1.1.1.3.1.1.1\">ACID</em></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.1.3.1\">\n<td class=\"ltx_td ltx_align_justify ltx_border_bb ltx_border_t\" id=\"S2.T1.1.3.1.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.1.3.1.1.1\">\n<span class=\"ltx_p\" id=\"S2.T1.1.3.1.1.1.1\">List of engineering branches Engineering is the discipline and profession that applies scientific theories , mathematical\u2026</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_bb ltx_border_t\" id=\"S2.T1.1.3.1.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.1.3.1.2.1\">\n<span class=\"ltx_p\" id=\"S2.T1.1.3.1.2.1.1\">teletraffic optomechanical nanoengineering subdiscipline eegs biotechnical bioprocess mechatronics metallics crazing\u2026</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_nopad_r ltx_align_justify ltx_border_bb ltx_border_t\" id=\"S2.T1.1.3.1.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.1.3.1.3.1\">\n<span class=\"ltx_p\" id=\"S2.T1.1.3.1.3.1.1\">(1) Major engineering branches: chemical, civil, electrical, mechanical (2) Chemical engineering: conversion of raw materials with varied specialties (3) Civil engineering: design\u2026</span>\n</span>\n</td>\n</tr>\n</tbody>\n</table>\n</div>\n</div>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 1: </span>An example of a document, its cluster-based ID (where each level of the clustering has 10 clusters), and its associated natural language, content-based IDs. \u2018First tokens\u2019 sets the ID to be the document\u2019s first tokens. BM25 scoring uses the top- highest-scoring tokens from the document as the ID, where scores are based on Okapi BM25. ACID uses an LM (e.g., GPT-3.5) to generate 5 keyphrases as the ID.</figcaption>\n</figure>",
|
| 94 |
+
"capture": "Table 1: An example of a document, its cluster-based ID (where each level of the clustering has 10 clusters), and its associated natural language, content-based IDs. \u2018First tokens\u2019 sets the ID to be the document\u2019s first tokens. BM25 scoring uses the top- highest-scoring tokens from the document as the ID, where scores are based on Okapi BM25. ACID uses an LM (e.g., GPT-3.5) to generate 5 keyphrases as the ID."
|
| 95 |
+
},
|
| 96 |
+
"2": {
|
| 97 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S3.T2\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S3.T2.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S3.T2.1.1.1\">\n<th class=\"ltx_td ltx_th ltx_th_row ltx_border_tt\" id=\"S3.T2.1.1.1.1\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_tt\" id=\"S3.T2.1.1.1.2\"></th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column ltx_border_tt\" id=\"S3.T2.1.1.1.3\">Ave. Query</th>\n<th class=\"ltx_td ltx_nopad_r ltx_align_right ltx_th ltx_th_column ltx_border_tt\" id=\"S3.T2.1.1.1.4\">Ave. Doc.</th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.1.2.2\">\n<th class=\"ltx_td ltx_th ltx_th_row\" id=\"S3.T2.1.2.2.1\"></th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column\" id=\"S3.T2.1.2.2.2\"># Pairs</th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column\" id=\"S3.T2.1.2.2.3\">Length</th>\n<th class=\"ltx_td ltx_nopad_r ltx_align_right ltx_th ltx_th_column\" id=\"S3.T2.1.2.2.4\">Length</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S3.T2.1.3.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S3.T2.1.3.1.1\">NQ-100k</th>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T2.1.3.1.2\">100,000</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T2.1.3.1.3\" rowspan=\"3\"><span class=\"ltx_text\" id=\"S3.T2.1.3.1.3.1\">49.2</span></td>\n<td class=\"ltx_td ltx_nopad_r ltx_align_right ltx_border_t\" id=\"S3.T2.1.3.1.4\" rowspan=\"3\"><span class=\"ltx_text\" id=\"S3.T2.1.3.1.4.1\">36,379.4</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.1.4.2\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S3.T2.1.4.2.1\">NQ-Dev</th>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.1.4.2.2\">1,968</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.1.5.3\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S3.T2.1.5.3.1\">NQ-Test</th>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.1.5.3.2\">7,830</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.1.6.4\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S3.T2.1.6.4.1\">MSMARCO-100k</th>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T2.1.6.4.2\">100,000</td>\n<td class=\"ltx_td ltx_align_right ltx_border_bb ltx_border_t\" id=\"S3.T2.1.6.4.3\" rowspan=\"3\"><span class=\"ltx_text\" id=\"S3.T2.1.6.4.3.1\">32.8</span></td>\n<td class=\"ltx_td ltx_nopad_r ltx_align_right ltx_border_bb ltx_border_t\" id=\"S3.T2.1.6.4.4\" rowspan=\"3\"><span class=\"ltx_text\" id=\"S3.T2.1.6.4.4.1\">334.4</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.1.7.5\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S3.T2.1.7.5.1\">MSMARCO-Dev</th>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.1.7.5.2\">2,000</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.1.8.6\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_bb\" id=\"S3.T2.1.8.6.1\">MSMARCO-Test</th>\n<td class=\"ltx_td ltx_align_right ltx_border_bb\" id=\"S3.T2.1.8.6.2\">6,980</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 2: </span>Dataset characteristics. \u2018# Pairs\u2019 refers to the number of query-document pairs. Average lengths refer to the average length in characters.</figcaption>\n</figure>",
|
| 98 |
+
"capture": "Table 2: Dataset characteristics. \u2018# Pairs\u2019 refers to the number of query-document pairs. Average lengths refer to the average length in characters."
|
| 99 |
+
},
|
| 100 |
+
"3": {
|
| 101 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T3\">\n<p class=\"ltx_p ltx_align_center\" id=\"S4.T3.1\"><span class=\"ltx_text ltx_inline-block\" id=\"S4.T3.1.1\" style=\"width:433.6pt;\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S4.T3.1.1.1\">\n<span class=\"ltx_tbody\">\n<span class=\"ltx_tr\" id=\"S4.T3.1.1.1.1.1\">\n<span class=\"ltx_td ltx_border_tt\" id=\"S4.T3.1.1.1.1.1.1\"></span>\n<span class=\"ltx_td ltx_align_center ltx_border_tt ltx_colspan ltx_colspan_3\" id=\"S4.T3.1.1.1.1.1.2\">MSMARCO 1k</span>\n<span class=\"ltx_td ltx_align_center ltx_border_tt ltx_colspan ltx_colspan_3\" id=\"S4.T3.1.1.1.1.1.3\">MSMARCO 10k</span>\n<span class=\"ltx_td ltx_align_center ltx_border_tt ltx_colspan ltx_colspan_3\" id=\"S4.T3.1.1.1.1.1.4\">MSMARCO 100k</span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.1.1.1.2.2\">\n<span class=\"ltx_td\" id=\"S4.T3.1.1.1.2.2.1\"></span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T3.1.1.1.2.2.2\">Rec@1</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T3.1.1.1.2.2.3\">@10</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T3.1.1.1.2.2.4\">@20</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T3.1.1.1.2.2.5\">Rec@1</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T3.1.1.1.2.2.6\">@10</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T3.1.1.1.2.2.7\">@20</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T3.1.1.1.2.2.8\">Rec@1</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T3.1.1.1.2.2.9\">@10</span>\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.1.1.1.2.2.10\">@20</span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.1.1.1.3.3\">\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T3.1.1.1.3.3.1\"><em class=\"ltx_emph ltx_font_italic\" id=\"S4.T3.1.1.1.3.3.1.1\">Baseline</em></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T3.1.1.1.3.3.2\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T3.1.1.1.3.3.3\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T3.1.1.1.3.3.4\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T3.1.1.1.3.3.5\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T3.1.1.1.3.3.6\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T3.1.1.1.3.3.7\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T3.1.1.1.3.3.8\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T3.1.1.1.3.3.9\"></span>\n<span class=\"ltx_td ltx_nopad_r ltx_border_t\" id=\"S4.T3.1.1.1.3.3.10\"></span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.1.1.1.4.4\">\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T3.1.1.1.4.4.1\">Cluster Integer IDs</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.1.1.1.4.4.2\">41.1</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.1.1.1.4.4.3\">59.5</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.1.1.1.4.4.4\">64.2</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.1.1.1.4.4.5\">42.4</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.1.1.1.4.4.6\">62.3</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.1.1.1.4.4.7\">67.1</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.1.1.1.4.4.8\">46.8</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.1.1.1.4.4.9\">68.8</span>\n<span class=\"ltx_td ltx_nopad_r ltx_align_center ltx_border_t\" id=\"S4.T3.1.1.1.4.4.10\">73.4</span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.1.1.1.5.5\">\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T3.1.1.1.5.5.1\"><em class=\"ltx_emph ltx_font_italic\" id=\"S4.T3.1.1.1.5.5.1.1\">Extractive Summarization IDs</em></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T3.1.1.1.5.5.2\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T3.1.1.1.5.5.3\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T3.1.1.1.5.5.4\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T3.1.1.1.5.5.5\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T3.1.1.1.5.5.6\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T3.1.1.1.5.5.7\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T3.1.1.1.5.5.8\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T3.1.1.1.5.5.9\"></span>\n<span class=\"ltx_td ltx_nopad_r ltx_border_t\" id=\"S4.T3.1.1.1.5.5.10\"></span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.1.1.1.6.6\">\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T3.1.1.1.6.6.1\">BM25 Top-30</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.1.1.1.6.6.2\">48.7</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.1.1.1.6.6.3\">74.3</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.1.1.1.6.6.4\">79.4</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.1.1.1.6.6.5\">49.1</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.1.1.1.6.6.6\">75.7</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.1.1.1.6.6.7\">80.1</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.1.1.1.6.6.8\">52.0</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.1.1.1.6.6.9\">79.2</span>\n<span class=\"ltx_td ltx_nopad_r ltx_align_center ltx_border_t\" id=\"S4.T3.1.1.1.6.6.10\">82.9</span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.1.1.1.7.7\">\n<span class=\"ltx_td ltx_align_left\" id=\"S4.T3.1.1.1.7.7.1\">First 30 Tokens</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T3.1.1.1.7.7.2\">49.0</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T3.1.1.1.7.7.3\">73.0</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T3.1.1.1.7.7.4\">77.8</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T3.1.1.1.7.7.5\">48.7</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T3.1.1.1.7.7.6\">72.8</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T3.1.1.1.7.7.7\">77.9</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T3.1.1.1.7.7.8\">51.8</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T3.1.1.1.7.7.9\">76.0</span>\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.1.1.1.7.7.10\">79.6</span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.1.1.1.8.8\">\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T3.1.1.1.8.8.1\"><em class=\"ltx_emph ltx_font_italic\" id=\"S4.T3.1.1.1.8.8.1.1\">Abstractive Summarization IDs</em></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T3.1.1.1.8.8.2\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T3.1.1.1.8.8.3\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T3.1.1.1.8.8.4\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T3.1.1.1.8.8.5\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T3.1.1.1.8.8.6\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T3.1.1.1.8.8.7\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T3.1.1.1.8.8.8\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T3.1.1.1.8.8.9\"></span>\n<span class=\"ltx_td ltx_nopad_r ltx_border_t\" id=\"S4.T3.1.1.1.8.8.10\"></span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.1.1.1.9.9\">\n<span class=\"ltx_td ltx_align_left ltx_border_bb ltx_border_t\" id=\"S4.T3.1.1.1.9.9.1\">ACID</span>\n<span class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.T3.1.1.1.9.9.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.1.1.1.9.9.2.1\">49.1</span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.T3.1.1.1.9.9.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.1.1.1.9.9.3.1\">74.3</span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.T3.1.1.1.9.9.4\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.1.1.1.9.9.4.1\">80.1</span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.T3.1.1.1.9.9.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.1.1.1.9.9.5.1\">50.4</span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.T3.1.1.1.9.9.6\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.1.1.1.9.9.6.1\">76.3</span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.T3.1.1.1.9.9.7\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.1.1.1.9.9.7.1\">80.4</span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.T3.1.1.1.9.9.8\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.1.1.1.9.9.8.1\">52.9</span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.T3.1.1.1.9.9.9\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.1.1.1.9.9.9.1\">79.5</span></span>\n<span class=\"ltx_td ltx_nopad_r ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.T3.1.1.1.9.9.10\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.1.1.1.9.9.10.1\">84.0</span></span></span>\n</span>\n</span></span></p>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 3: </span>Recall for MSMARCO. Recall refers to the percentage of queries in the evaluation set for which the ground-truth document ID was produced in the top-1, top-10, and top-20 candidates from constrained beam search decoding. MSMARCO 1k, 10k, and 100k refer to the number of training query-document pairs used to finetune the LM.</figcaption>\n</figure>",
|
| 102 |
+
"capture": "Table 3: Recall for MSMARCO. Recall refers to the percentage of queries in the evaluation set for which the ground-truth document ID was produced in the top-1, top-10, and top-20 candidates from constrained beam search decoding. MSMARCO 1k, 10k, and 100k refer to the number of training query-document pairs used to finetune the LM."
|
| 103 |
+
},
|
| 104 |
+
"4": {
|
| 105 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T4\">\n<p class=\"ltx_p ltx_align_center\" id=\"S4.T4.1\"><span class=\"ltx_text ltx_inline-block\" id=\"S4.T4.1.1\" style=\"width:433.6pt;\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S4.T4.1.1.1\">\n<span class=\"ltx_tbody\">\n<span class=\"ltx_tr\" id=\"S4.T4.1.1.1.1.1\">\n<span class=\"ltx_td ltx_border_tt\" id=\"S4.T4.1.1.1.1.1.1\"></span>\n<span class=\"ltx_td ltx_align_center ltx_border_tt ltx_colspan ltx_colspan_3\" id=\"S4.T4.1.1.1.1.1.2\">NQ 1k</span>\n<span class=\"ltx_td ltx_align_center ltx_border_tt ltx_colspan ltx_colspan_3\" id=\"S4.T4.1.1.1.1.1.3\">NQ 10k</span>\n<span class=\"ltx_td ltx_align_center ltx_border_tt ltx_colspan ltx_colspan_3\" id=\"S4.T4.1.1.1.1.1.4\">NQ 100k</span></span>\n<span class=\"ltx_tr\" id=\"S4.T4.1.1.1.2.2\">\n<span class=\"ltx_td\" id=\"S4.T4.1.1.1.2.2.1\"></span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.2.2.2\">Rec@1</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.2.2.3\">@10</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.2.2.4\">@20</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.2.2.5\">Rec@1</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.2.2.6\">@10</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.2.2.7\">@20</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.2.2.8\">Rec@1</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.2.2.9\">@10</span>\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T4.1.1.1.2.2.10\">@20</span></span>\n<span class=\"ltx_tr\" id=\"S4.T4.1.1.1.3.3\">\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T4.1.1.1.3.3.1\"><em class=\"ltx_emph ltx_font_italic\" id=\"S4.T4.1.1.1.3.3.1.1\">Baselines</em></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T4.1.1.1.3.3.2\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T4.1.1.1.3.3.3\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T4.1.1.1.3.3.4\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T4.1.1.1.3.3.5\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T4.1.1.1.3.3.6\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T4.1.1.1.3.3.7\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T4.1.1.1.3.3.8\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T4.1.1.1.3.3.9\"></span>\n<span class=\"ltx_td ltx_nopad_r ltx_border_t\" id=\"S4.T4.1.1.1.3.3.10\"></span></span>\n<span class=\"ltx_tr\" id=\"S4.T4.1.1.1.4.4\">\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T4.1.1.1.4.4.1\">BM25</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.1.1.1.4.4.2\">20.9</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.1.1.1.4.4.3\">53.8</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.1.1.1.4.4.4\">62.7</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.1.1.1.4.4.5\">20.9</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.1.1.1.4.4.6\">53.8</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.1.1.1.4.4.7\">62.7</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.1.1.1.4.4.8\">20.9</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.1.1.1.4.4.9\">53.8</span>\n<span class=\"ltx_td ltx_nopad_r ltx_align_center ltx_border_t\" id=\"S4.T4.1.1.1.4.4.10\">62.7</span></span>\n<span class=\"ltx_tr\" id=\"S4.T4.1.1.1.5.5\">\n<span class=\"ltx_td ltx_align_left\" id=\"S4.T4.1.1.1.5.5.1\">Dense Passage Retrieval</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.5.5.2\">25.8</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.5.5.3\">62.6</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.5.5.4\">70.9</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.5.5.5\">32.8</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.5.5.6\">74.9</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.5.5.7\">82.6</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.5.5.8\">35.5</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.5.5.9\">78.7</span>\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T4.1.1.1.5.5.10\">86.1</span></span>\n<span class=\"ltx_tr\" id=\"S4.T4.1.1.1.6.6\">\n<span class=\"ltx_td ltx_align_left\" id=\"S4.T4.1.1.1.6.6.1\">Cluster Integer IDs</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.6.6.2\">38.4</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.6.6.3\">64.2</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.6.6.4\">69.4</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.6.6.5\">40.2</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.6.6.6\">67.5</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.6.6.7\">72.7</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.6.6.8\">40.8</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.6.6.9\">68.2</span>\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T4.1.1.1.6.6.10\">73.0</span></span>\n<span class=\"ltx_tr\" id=\"S4.T4.1.1.1.7.7\">\n<span class=\"ltx_td ltx_align_left\" id=\"S4.T4.1.1.1.7.7.1\">TSGen <cite class=\"ltx_cite ltx_citemacro_citep\">(Zhang et\u00a0al., <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2311.08593v2#bib.bib18\" title=\"\">2024 ###reference_b18###</a>)</cite></span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.7.7.2\">28.8</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.7.7.3\">67.1</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.7.7.4\">73.6</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.7.7.5\">29.2</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.7.7.6\">67.6</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.7.7.7\">74.4</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.7.7.8\">30.3</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.7.7.9\">71.8</span>\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T4.1.1.1.7.7.10\">78.3</span></span>\n<span class=\"ltx_tr\" id=\"S4.T4.1.1.1.8.8\">\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T4.1.1.1.8.8.1\"><em class=\"ltx_emph ltx_font_italic\" id=\"S4.T4.1.1.1.8.8.1.1\">Summarization-based IDs</em></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T4.1.1.1.8.8.2\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T4.1.1.1.8.8.3\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T4.1.1.1.8.8.4\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T4.1.1.1.8.8.5\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T4.1.1.1.8.8.6\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T4.1.1.1.8.8.7\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T4.1.1.1.8.8.8\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T4.1.1.1.8.8.9\"></span>\n<span class=\"ltx_td ltx_nopad_r ltx_border_t\" id=\"S4.T4.1.1.1.8.8.10\"></span></span>\n<span class=\"ltx_tr\" id=\"S4.T4.1.1.1.9.9\">\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T4.1.1.1.9.9.1\">BM25 Top-30</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.1.1.1.9.9.2\">36.5</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.1.1.1.9.9.3\">66.1</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.1.1.1.9.9.4\">70.9</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.1.1.1.9.9.5\">36.8</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.1.1.1.9.9.6\">66.1</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.1.1.1.9.9.7\">71.1</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.1.1.1.9.9.8\">37.0</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.1.1.1.9.9.9\">68.2</span>\n<span class=\"ltx_td ltx_nopad_r ltx_align_center ltx_border_t\" id=\"S4.T4.1.1.1.9.9.10\">72.8</span></span>\n<span class=\"ltx_tr\" id=\"S4.T4.1.1.1.10.10\">\n<span class=\"ltx_td ltx_align_left\" id=\"S4.T4.1.1.1.10.10.1\">First 30 Tokens</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.10.10.2\">41.9</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.10.10.3\">66.0</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.10.10.4\">69.9</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.10.10.5\">43.3</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.10.10.6\">67.6</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.10.10.7\">71.6</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.10.10.8\">47.7</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.10.10.9\">71.2</span>\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T4.1.1.1.10.10.10\">74.4</span></span>\n<span class=\"ltx_tr\" id=\"S4.T4.1.1.1.11.11\">\n<span class=\"ltx_td ltx_align_left\" id=\"S4.T4.1.1.1.11.11.1\">ACID</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.11.11.2\">39.2</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.11.11.3\">69.2</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.11.11.4\">74.0</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.11.11.5\">40.5</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.11.11.6\">70.7</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.11.11.7\">75.2</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.11.11.8\">40.9</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1.11.11.9\">74.9</span>\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T4.1.1.1.11.11.10\">80.2</span></span>\n<span class=\"ltx_tr\" id=\"S4.T4.1.1.1.12.12\">\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T4.1.1.1.12.12.1\"><em class=\"ltx_emph ltx_font_italic\" id=\"S4.T4.1.1.1.12.12.1.1\">Summarization-based IDs with Joint Decoding</em></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T4.1.1.1.12.12.2\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T4.1.1.1.12.12.3\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T4.1.1.1.12.12.4\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T4.1.1.1.12.12.5\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T4.1.1.1.12.12.6\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T4.1.1.1.12.12.7\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T4.1.1.1.12.12.8\"></span>\n<span class=\"ltx_td ltx_border_t\" id=\"S4.T4.1.1.1.12.12.9\"></span>\n<span class=\"ltx_td ltx_nopad_r ltx_border_t\" id=\"S4.T4.1.1.1.12.12.10\"></span></span>\n<span class=\"ltx_tr\" id=\"S4.T4.1.1.1.13.13\">\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T4.1.1.1.13.13.1\">First 30 Tokens w/ Joint Dec</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.1.1.1.13.13.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.1.1.1.13.13.2.1\">49.1</span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.1.1.1.13.13.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.1.1.1.13.13.3.1\">78.7</span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.1.1.1.13.13.4\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.1.1.1.13.13.4.1\">82.6</span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.1.1.1.13.13.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.1.1.1.13.13.5.1\">49.7</span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.1.1.1.13.13.6\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.1.1.1.13.13.6.1\">79.2</span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.1.1.1.13.13.7\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.1.1.1.13.13.7.1\">83.1</span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.1.1.1.13.13.8\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.1.1.1.13.13.8.1\">55.3</span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.1.1.1.13.13.9\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.1.1.1.13.13.9.1\">83.0</span></span>\n<span class=\"ltx_td ltx_nopad_r ltx_align_center ltx_border_t\" id=\"S4.T4.1.1.1.13.13.10\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.1.1.1.13.13.10.1\">86.4</span></span></span>\n<span class=\"ltx_tr\" id=\"S4.T4.1.1.1.14.14\">\n<span class=\"ltx_td ltx_align_left ltx_border_bb\" id=\"S4.T4.1.1.1.14.14.1\">ACID w/ Joint Dec</span>\n<span class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T4.1.1.1.14.14.2\">41.3</span>\n<span class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T4.1.1.1.14.14.3\">77.3</span>\n<span class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T4.1.1.1.14.14.4\">82.5</span>\n<span class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T4.1.1.1.14.14.5\">41.3</span>\n<span class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T4.1.1.1.14.14.6\">77.0</span>\n<span class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T4.1.1.1.14.14.7\">82.9</span>\n<span class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T4.1.1.1.14.14.8\">42.3</span>\n<span class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T4.1.1.1.14.14.9\">78.0</span>\n<span class=\"ltx_td ltx_nopad_r ltx_align_center ltx_border_bb\" id=\"S4.T4.1.1.1.14.14.10\">84.0</span></span>\n</span>\n</span></span></p>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 4: </span>Recall for Natural Questions. Recall refers to the percentage of queries in the evaluation set for which the ground-truth document ID was produced in the top-1, top-10, and top-20 candidates from constrained beam search decoding. NQ 1k, 10k, and 100k refer to the number of training query-document pairs used to finetune the LM. \u2018Joint Dec\u2019 refers to joint decoding with the small, task-specific 160M parameter LM and a large 12B parameter LM with in-context learning.</figcaption>\n</figure>",
|
| 106 |
+
"capture": "Table 4: Recall for Natural Questions. Recall refers to the percentage of queries in the evaluation set for which the ground-truth document ID was produced in the top-1, top-10, and top-20 candidates from constrained beam search decoding. NQ 1k, 10k, and 100k refer to the number of training query-document pairs used to finetune the LM. \u2018Joint Dec\u2019 refers to joint decoding with the small, task-specific 160M parameter LM and a large 12B parameter LM with in-context learning."
|
| 107 |
+
},
|
| 108 |
+
"5": {
|
| 109 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T5\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.T5.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T5.1.1.1\">\n<th class=\"ltx_td ltx_th ltx_th_row ltx_border_tt\" id=\"S4.T5.1.1.1.1\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" colspan=\"3\" id=\"S4.T5.1.1.1.2\">Cluster IDs</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" colspan=\"3\" id=\"S4.T5.1.1.1.3\">ACIDs</th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.2.2\">\n<th class=\"ltx_td ltx_th ltx_th_row\" id=\"S4.T5.1.2.2.1\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S4.T5.1.2.2.2\">Rec@1</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S4.T5.1.2.2.3\">@10</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S4.T5.1.2.2.4\">@20</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S4.T5.1.2.2.5\">Rec@1</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S4.T5.1.2.2.6\">@10</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S4.T5.1.2.2.7\">@20</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T5.1.3.1\">\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_row ltx_border_t\" id=\"S4.T5.1.3.1.1\">Beam width 1</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T5.1.3.1.2\">47.6</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T5.1.3.1.3\">\u2013</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T5.1.3.1.4\">\u2013</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T5.1.3.1.5\">54.0</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T5.1.3.1.6\">\u2013</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T5.1.3.1.7\">\u2013</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.4.2\">\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_row\" id=\"S4.T5.1.4.2.1\">2</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.4.2.2\">48.7</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.4.2.3\">\u2013</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.4.2.4\">\u2013</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.4.2.5\">56.0</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.4.2.6\">\u2013</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.4.2.7\">\u2013</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.5.3\">\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_row\" id=\"S4.T5.1.5.3.1\">4</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.5.3.2\">49.0</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.5.3.3\">\u2013</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.5.3.4\">\u2013</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.5.3.5\">55.7</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.5.3.6\">\u2013</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.5.3.7\">\u2013</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.6.4\">\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_row\" id=\"S4.T5.1.6.4.1\">8</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.6.4.2\">48.8</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.6.4.3\">\u2013</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.6.4.4\">\u2013</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.6.4.5\">56.5</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.6.4.6\">\u2013</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.6.4.7\">\u2013</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.7.5\">\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_row\" id=\"S4.T5.1.7.5.1\">16</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.7.5.2\">49.0</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.7.5.3\">71.0</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.7.5.4\">\u2013</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.7.5.5\">56.6</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.7.5.6\">84.1</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.7.5.7\">\u2013</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.8.6\">\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_row\" id=\"S4.T5.1.8.6.1\">20</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.8.6.2\">49.0</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.8.6.3\">71.1</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.8.6.4\">75.6</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.8.6.5\">55.0</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.8.6.6\">83.4</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.8.6.7\">87.2</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.9.7\">\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_row\" id=\"S4.T5.1.9.7.1\">30</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.9.7.2\">49.0</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.9.7.3\">70.9</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.9.7.4\">75.6</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.9.7.5\">56.4</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.9.7.6\">84.1</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.9.7.7\">88.3</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.10.8\">\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_row\" id=\"S4.T5.1.10.8.1\">40</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.10.8.2\">49.0</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.10.8.3\">70.9</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.10.8.4\">75.6</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.10.8.5\">56.5</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.10.8.6\">84.1</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.10.8.7\">88.3</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.11.9\">\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_row ltx_border_bb\" id=\"S4.T5.1.11.9.1\">50</th>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T5.1.11.9.2\">49.0</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T5.1.11.9.3\">70.9</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T5.1.11.9.4\">75.6</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T5.1.11.9.5\">56.5</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T5.1.11.9.6\">84.1</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T5.1.11.9.7\">88.4</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 5: </span>Recall of the 1B-parameter model versus beam width on the MSMARCO 100k dataset.</figcaption>\n</figure>",
|
| 110 |
+
"capture": "Table 5: Recall of the 1B-parameter model versus beam width on the MSMARCO 100k dataset."
|
| 111 |
+
},
|
| 112 |
+
"6": {
|
| 113 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T6\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.T6.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T6.1.1.1\">\n<th class=\"ltx_td ltx_th ltx_th_row ltx_border_tt\" id=\"S4.T6.1.1.1.1\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" colspan=\"3\" id=\"S4.T6.1.1.1.2\">MSMARCO 100k</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" colspan=\"3\" id=\"S4.T6.1.1.1.3\">NQ 100k</th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T6.1.2.2\">\n<th class=\"ltx_td ltx_th ltx_th_row\" id=\"S4.T6.1.2.2.1\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S4.T6.1.2.2.2\">Rec@1</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S4.T6.1.2.2.3\">@10</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S4.T6.1.2.2.4\">@20</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S4.T6.1.2.2.5\">Rec@1</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S4.T6.1.2.2.6\">@10</th>\n<th class=\"ltx_td ltx_nopad_r ltx_align_center ltx_th ltx_th_column\" id=\"S4.T6.1.2.2.7\">@20</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T6.1.3.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S4.T6.1.3.1.1\">First 10</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T6.1.3.1.2\">51.1</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T6.1.3.1.3\">75.8</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T6.1.3.1.4\">79.8</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T6.1.3.1.5\">46.9</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T6.1.3.1.6\">65.0</td>\n<td class=\"ltx_td ltx_nopad_r ltx_align_center ltx_border_t\" id=\"S4.T6.1.3.1.7\">67.6</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T6.1.4.2\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T6.1.4.2.1\">First 20</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T6.1.4.2.2\">50.0</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T6.1.4.2.3\">77.0</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T6.1.4.2.4\">80.4</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T6.1.4.2.5\">46.9</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T6.1.4.2.6\">69.1</td>\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T6.1.4.2.7\">72.3</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T6.1.5.3\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T6.1.5.3.1\">First 30</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T6.1.5.3.2\">51.8</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T6.1.5.3.3\">76.0</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T6.1.5.3.4\">79.6</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T6.1.5.3.5\">47.7</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T6.1.5.3.6\">71.2</td>\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T6.1.5.3.7\">74.4</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T6.1.6.4\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_bb\" id=\"S4.T6.1.6.4.1\">First 40</th>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T6.1.6.4.2\">49.8</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T6.1.6.4.3\">75.8</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T6.1.6.4.4\">79.2</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T6.1.6.4.5\">49.9</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T6.1.6.4.6\">72.3</td>\n<td class=\"ltx_td ltx_nopad_r ltx_align_center ltx_border_bb\" id=\"S4.T6.1.6.4.7\">75.4</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 6: </span>Recall on MSMARCO and NQ 100k versus the length of the document ID. Here, we use the extractive summarization ID based on the first 10, 20, 30, or 40 tokens of each document.</figcaption>\n</figure>",
|
| 114 |
+
"capture": "Table 6: Recall on MSMARCO and NQ 100k versus the length of the document ID. Here, we use the extractive summarization ID based on the first 10, 20, 30, or 40 tokens of each document."
|
| 115 |
+
}
|
| 116 |
+
},
|
| 117 |
+
"image_paths": {
|
| 118 |
+
"1": {
|
| 119 |
+
"figure_path": "2311.08593v2_figure_1.png",
|
| 120 |
+
"caption": "Figure 1: \nGenerative retrieval vs. dense retrieval.\nIn dense retrieval (right), both the query and the documents are encoded into dense vectors (i.e., embeddings).\nNearest-neighbor search is then applied to find the most relevant documents.\nGenerative retrieval (left) trains a language model to generate the relevant document ID conditional on the query. The ID is tied to a unique document, allowing for direct lookup.\nWe propose summarization-based document IDs like ACID, which uses GPT-3.5 to create a sequence of abstractive keyphrases to serve as the document ID.",
|
| 121 |
+
"url": "http://arxiv.org/html/2311.08593v2/x1.png"
|
| 122 |
+
},
|
| 123 |
+
"2": {
|
| 124 |
+
"figure_path": "2311.08593v2_figure_2.png",
|
| 125 |
+
"caption": "Figure 2: Data processing and model training. (a) Each document-query pair from the training corpus will be converted into inputs and outputs for finetuning the pretrained transformer decoder, which serves as the generative retrieval model. (b) GPT-3.5 is used to generate a sequence of keyphrases, which is used as the document ID. (c) Given a user query or a synthetic query, the generative retrieval model learns to generate the ID of the relevant document. We use a doc2query model to generate synthetic queries as additional inputs.\nRandomly sampled spans of 64 tokens can also be used as inputs to ensure that the model associates the contents of each document with its ID.",
|
| 126 |
+
"url": "http://arxiv.org/html/2311.08593v2/x2.png"
|
| 127 |
+
},
|
| 128 |
+
"3": {
|
| 129 |
+
"figure_path": "2311.08593v2_figure_3.png",
|
| 130 |
+
"caption": "Figure 3: Recall versus the number of parameters in the LM on the MSMARCO 100k dataset.",
|
| 131 |
+
"url": "http://arxiv.org/html/2311.08593v2/extracted/5964030/figures/model_size.png"
|
| 132 |
+
}
|
| 133 |
+
},
|
| 134 |
+
"validation": true,
|
| 135 |
+
"references": [
|
| 136 |
+
{
|
| 137 |
+
"1": {
|
| 138 |
+
"title": "MS MARCO: A human generated machine reading comprehension dataset.",
|
| 139 |
+
"author": "Payal Bajaj, Daniel Campos, Nick Craswell, Li Deng, Jianfeng Gao, Xiaodong Liu, Rangan Majumder, Andrew McNamara, Bhaskar Mitra, Tri Nguyen, et al. 2016.",
|
| 140 |
+
"venue": "In Proc. of CoCo.",
|
| 141 |
+
"url": "https://arxiv.org/abs/1611.09268"
|
| 142 |
+
}
|
| 143 |
+
},
|
| 144 |
+
{
|
| 145 |
+
"2": {
|
| 146 |
+
"title": "Autoregressive search engines: Generating substrings as document identifiers.",
|
| 147 |
+
"author": "Michele Bevilacqua, Giuseppe Ottaviano, Patrick Lewis, Scott Yih, Sebastian Riedel, and Fabio Petroni. 2022.",
|
| 148 |
+
"venue": "Advances in Neural Information Processing Systems, 35:31668\u201331683.",
|
| 149 |
+
"url": null
|
| 150 |
+
}
|
| 151 |
+
},
|
| 152 |
+
{
|
| 153 |
+
"3": {
|
| 154 |
+
"title": "Pythia: A suite for analyzing large language models across training and scaling.",
|
| 155 |
+
"author": "Stella Biderman, Hailey Schoelkopf, Quentin Anthony, Herbie Bradley, Kyle O\u2019Brien, Eric Hallahan, Mohammad Aflah Khan, Shivanshu Purohit, USVSN Sai Prashanth, Edward Raff, et al. 2023.",
|
| 156 |
+
"venue": "arXiv preprint arXiv:2304.01373.",
|
| 157 |
+
"url": null
|
| 158 |
+
}
|
| 159 |
+
},
|
| 160 |
+
{
|
| 161 |
+
"4": {
|
| 162 |
+
"title": "Autoregressive entity retrieval.",
|
| 163 |
+
"author": "Nicola De Cao, Gautier Izacard, Sebastian Riedel, and Fabio Petroni. 2020.",
|
| 164 |
+
"venue": "arXiv preprint arXiv:2010.00904.",
|
| 165 |
+
"url": null
|
| 166 |
+
}
|
| 167 |
+
},
|
| 168 |
+
{
|
| 169 |
+
"5": {
|
| 170 |
+
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding.",
|
| 171 |
+
"author": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019.",
|
| 172 |
+
"venue": "In Proc. of NAACL.",
|
| 173 |
+
"url": "https://arxiv.org/abs/810.04805"
|
| 174 |
+
}
|
| 175 |
+
},
|
| 176 |
+
{
|
| 177 |
+
"6": {
|
| 178 |
+
"title": "Triviaqa: A large scale distantly supervised challenge dataset for reading comprehension.",
|
| 179 |
+
"author": "Mandar Joshi, Eunsol Choi, Daniel S Weld, and Luke Zettlemoyer. 2017.",
|
| 180 |
+
"venue": "arXiv preprint arXiv:1705.03551.",
|
| 181 |
+
"url": null
|
| 182 |
+
}
|
| 183 |
+
},
|
| 184 |
+
{
|
| 185 |
+
"7": {
|
| 186 |
+
"title": "Dense passage retrieval for open-domain question answering.",
|
| 187 |
+
"author": "Vladimir Karpukhin, Barlas Oguz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 2020.",
|
| 188 |
+
"venue": "In Proc. of EMNLP.",
|
| 189 |
+
"url": "https://arxiv.org/abs/2004.04906"
|
| 190 |
+
}
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"8": {
|
| 194 |
+
"title": "Natural questions: a benchmark for question answering research.",
|
| 195 |
+
"author": "Tom Kwiatkowski, Jennimaria Palomaki, Olivia Redfield, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Jacob Devlin, Kenton Lee, et al. 2019.",
|
| 196 |
+
"venue": "Transactions of the Association for Computational Linguistics, 7:453\u2013466.",
|
| 197 |
+
"url": null
|
| 198 |
+
}
|
| 199 |
+
},
|
| 200 |
+
{
|
| 201 |
+
"9": {
|
| 202 |
+
"title": "Decoupled weight decay regularization.",
|
| 203 |
+
"author": "Ilya Loshchilov and Frank Hutter. 2017.",
|
| 204 |
+
"venue": "arXiv preprint arXiv:1711.05101.",
|
| 205 |
+
"url": null
|
| 206 |
+
}
|
| 207 |
+
},
|
| 208 |
+
{
|
| 209 |
+
"10": {
|
| 210 |
+
"title": "Generation-augmented retrieval for open-domain question answering.",
|
| 211 |
+
"author": "Yuning Mao, Pengcheng He, Xiaodong Liu, Yelong Shen, Jianfeng Gao, Jiawei Han, and Weizhu Chen. 2020.",
|
| 212 |
+
"venue": "arXiv preprint arXiv:2009.08553.",
|
| 213 |
+
"url": null
|
| 214 |
+
}
|
| 215 |
+
},
|
| 216 |
+
{
|
| 217 |
+
"11": {
|
| 218 |
+
"title": "Document expansion by query prediction.",
|
| 219 |
+
"author": "Rodrigo Nogueira, Wei Yang, Jimmy Lin, and Kyunghyun Cho. 2019.",
|
| 220 |
+
"venue": "arXiv preprint arXiv:1904.08375.",
|
| 221 |
+
"url": null
|
| 222 |
+
}
|
| 223 |
+
},
|
| 224 |
+
{
|
| 225 |
+
"12": {
|
| 226 |
+
"title": "How does generative retrieval scale to millions of passages?",
|
| 227 |
+
"author": "Ronak Pradeep, Kai Hui, Jai Gupta, Adam D Lelkes, Honglei Zhuang, Jimmy Lin, Donald Metzler, and Vinh Q Tran. 2023.",
|
| 228 |
+
"venue": "arXiv preprint arXiv:2305.11841.",
|
| 229 |
+
"url": null
|
| 230 |
+
}
|
| 231 |
+
},
|
| 232 |
+
{
|
| 233 |
+
"13": {
|
| 234 |
+
"title": "Exploring the limits of transfer learning with a unified text-to-text transformer.",
|
| 235 |
+
"author": "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2020.",
|
| 236 |
+
"venue": "JMLR.",
|
| 237 |
+
"url": "http://jmlr.org/papers/v21/20-074.html"
|
| 238 |
+
}
|
| 239 |
+
},
|
| 240 |
+
{
|
| 241 |
+
"14": {
|
| 242 |
+
"title": "Learning to tokenize for generative retrieval.",
|
| 243 |
+
"author": "Weiwei Sun, Lingyong Yan, Zheng Chen, Shuaiqiang Wang, Haichao Zhu, Pengjie Ren, Zhumin Chen, Dawei Yin, Maarten Rijke, and Zhaochun Ren. 2024.",
|
| 244 |
+
"venue": "Advances in Neural Information Processing Systems, 36.",
|
| 245 |
+
"url": null
|
| 246 |
+
}
|
| 247 |
+
},
|
| 248 |
+
{
|
| 249 |
+
"15": {
|
| 250 |
+
"title": "Transformer memory as a differentiable search index.",
|
| 251 |
+
"author": "Yi Tay, Vinh Tran, Mostafa Dehghani, Jianmo Ni, Dara Bahri, Harsh Mehta, Zhen Qin, Kai Hui, Zhe Zhao, Jai Gupta, et al. 2022.",
|
| 252 |
+
"venue": "Advances in Neural Information Processing Systems, 35:21831\u201321843.",
|
| 253 |
+
"url": null
|
| 254 |
+
}
|
| 255 |
+
},
|
| 256 |
+
{
|
| 257 |
+
"16": {
|
| 258 |
+
"title": "A neural corpus indexer for document retrieval.",
|
| 259 |
+
"author": "Yujing Wang, Yingyan Hou, Haonan Wang, Ziming Miao, Shibin Wu, Qi Chen, Yuqing Xia, Chengmin Chi, Guoshuai Zhao, Zheng Liu, et al. 2022.",
|
| 260 |
+
"venue": "Advances in Neural Information Processing Systems, 35:25600\u201325614.",
|
| 261 |
+
"url": null
|
| 262 |
+
}
|
| 263 |
+
},
|
| 264 |
+
{
|
| 265 |
+
"17": {
|
| 266 |
+
"title": "Anserini: Enabling the use of lucene for information retrieval research.",
|
| 267 |
+
"author": "Peilin Yang, Hui Fang, and Jimmy Lin. 2017.",
|
| 268 |
+
"venue": "In Proceedings of the 40th international ACM SIGIR conference on research and development in information retrieval, pages 1253\u20131256.",
|
| 269 |
+
"url": null
|
| 270 |
+
}
|
| 271 |
+
},
|
| 272 |
+
{
|
| 273 |
+
"18": {
|
| 274 |
+
"title": "Generative retrieval via term set generation.",
|
| 275 |
+
"author": "Peitian Zhang, Zheng Liu, Yujia Zhou, Zhicheng Dou, Fangchao Liu, and Zhao Cao. 2024.",
|
| 276 |
+
"venue": "arXiv preprint arXiv:2305.13859.",
|
| 277 |
+
"url": null
|
| 278 |
+
}
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"19": {
|
| 282 |
+
"title": "Bridging the gap between indexing and retrieval for differentiable search index with query generation.",
|
| 283 |
+
"author": "Shengyao Zhuang, Houxing Ren, Linjun Shou, Jian Pei, Ming Gong, Guido Zuccon, and Daxin Jiang. 2023.",
|
| 284 |
+
"venue": "The First Workshop on Generative Information Retrieval at SIGIR.",
|
| 285 |
+
"url": null
|
| 286 |
+
}
|
| 287 |
+
}
|
| 288 |
+
],
|
| 289 |
+
"url": "http://arxiv.org/html/2311.08593v2"
|
| 290 |
+
}
|
20241030/2312.01847v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2312.05439v3.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2312.10336v2.json
ADDED
|
@@ -0,0 +1,667 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Certified Minimax Unlearning with Generalization Rates and Deletion Capacity",
|
| 3 |
+
"abstract": "We study the problem of -certified machine unlearning for minimax models. Most of the existing works focus on unlearning from standard statistical learning models that have a single variable and their unlearning steps hinge on the direct Hessian-based conventional Newton update. We develop a new -certified machine unlearning algorithm for minimax models. It proposes a minimax unlearning step consisting of a total Hessian-based complete Newton update and the Gaussian mechanism borrowed from differential privacy. To obtain the unlearning certification, our method injects calibrated Gaussian noises by carefully analyzing the \u201csensitivity\u201d of the minimax unlearning step (i.e., the closeness between the minimax unlearning variables and the retraining-from-scratch variables). We derive the generalization rates in terms of population strong and weak primal-dual risk for three different cases of loss functions, i.e., (strongly-)convex-(strongly-)concave losses. We also provide the deletion capacity to guarantee that a desired population risk can be maintained as long as the number of deleted samples does not exceed the derived amount. With training samples and model dimension , it yields the order , which shows a strict gap over the baseline method of differentially private minimax learning that has . In addition, our rates of generalization and deletion capacity match the state-of-the-art rates derived previously for standard statistical learning models.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Minimax models have been widely applied in a variety of machine learning applications, including generative adversarial networks (Goodfellow et al., 2014 ###reference_b25###; Arjovsky et al., 2017 ###reference_b1###), adversarially robust learning (Madry et al., 2018 ###reference_b36###; Sinha et al., 2018 ###reference_b49###), and reinforcement learning (Du et al., 2017 ###reference_b16###; Dai et al., 2018 ###reference_b14###). This is largely credited to the two-variable (i.e., primal and dual variables) model structure of minimax models, which is versatile enough to accommodate such diverse instantiations. As is common in machine learning practice, training a successful minimax model relies crucially on a potentially large corpus of training samples that are contributed by users. This raises privacy concerns for minimax models. Unlike standard statistical learning (STL) models, the privacy studies for minimax models are relatively newer. Most of the existing studies focus on privacy protection during the training phase under the differential privacy (DP) notion (Dwork et al., 2006 ###reference_b17###) and federated minimax learning settings (Sharma et al., 2022 ###reference_b47###). Recent works in this direction have successfully achieved several optimal generalization performances measured in terms of the population primal-dual (PD) risk for DP minimax models specifically (Yang et al., 2022 ###reference_b63###; Zhang et al., 2022a ###reference_b66###; Bassily et al., 2023 ###reference_b3###; Boob and Guzm\u00e1n, 2023 ###reference_b4###).\n###table_1### Machine unlearning is an emerging privacy-respecting problem concerning already-trained models (i.e., during the post-training phase) (Cao and Yang, 2015 ###reference_b7###; Guo et al., 2020 ###reference_b27###; Sekhari et al., 2021 ###reference_b46###; Graves et al., 2021 ###reference_b26###; Bourtoule et al., 2021 ###reference_b5###; Li et al., 2021 ###reference_b31###; Shibata et al., 2021 ###reference_b48###; Wu et al., 2022 ###reference_b58###; Cheng et al., 2023 ###reference_b11###; Chen et al., 2023 ###reference_b10###; Tarun et al., 2023 ###reference_b51###; Wu et al., 2023 ###reference_b60###; Ghazi et al., 2023 ###reference_b20###; Wang et al., 2023b ###reference_b56###). That is, it removes certain training samples from the trained model upon their users\u2019 data deletion requests. It is driven by the right to be forgotten, which is mandated by a growing number of user data protection legislations enacted in recent years. Prominent examples include the European Union\u2019s General Data Protection Regulation (GDPR) (Mantelero, 2013 ###reference_b38###), the California Consumer Privacy Act (CCPA), and Canada\u2019s proposed Consumer Privacy Protection Act (CPPA). Machine unlearning comes with several desiderata. Besides sufficiently removing the influence of the data being deleted, it should be efficient and avoid the prohibitive computational cost of the baseline method to fully retrain the model on the remaining dataset from scratch. To guarantee the sufficiency of data removal, there are exact machine unlearning methods (Cao and Yang, 2015 ###reference_b7###; Ginart et al., 2019 ###reference_b21###; Brophy and Lowd, 2021 ###reference_b6###; Bourtoule et al., 2021 ###reference_b5###; Ullah et al., 2021 ###reference_b53###; Schelter et al., 2021 ###reference_b45###; Chen et al., 2022b ###reference_b9###, a ###reference_b8###; Yan et al., 2022 ###reference_b62###; Di et al., 2023 ###reference_b15###; Xia et al., 2023 ###reference_b61###) and approximate machine unlearning methods (Golatkar et al., 2020a ###reference_b22###; Wu et al., 2020 ###reference_b59###; Golatkar et al., 2020b ###reference_b23###; Nguyen et al., 2020 ###reference_b42###; Neel et al., 2021 ###reference_b40###; Peste et al., 2021 ###reference_b44###; Golatkar et al., 2021 ###reference_b24###; Warnecke et al., 2023 ###reference_b57###; Izzo et al., 2021 ###reference_b28###; Mahadevan and Mathioudakis, 2021 ###reference_b37###; Mehta et al., 2022 ###reference_b39###; Zhang et al., 2022c ###reference_b69###; Wang et al., 2023a ###reference_b55###; Chien et al., 2023a ###reference_b12###; Lin et al., 2023 ###reference_b32###) (some can offer the rigorous -certification (Guo et al., 2020 ###reference_b27###; Sekhari et al., 2021 ###reference_b46###; Suriyakumar and Wilson, 2022 ###reference_b50###; Chien et al., 2023b ###reference_b13###) inspired by differential privacy). In addition, recent studies also point out the importance of understanding the relationship between the generalization performance and the amount of deleted samples (Sekhari et al., 2021 ###reference_b46###; Suriyakumar and Wilson, 2022 ###reference_b50###). In particular, they introduce the definition of deletion capacity to formally quantify the number of samples that can be deleted for the after-unlearning model to maintain a designated population risk. However, most existing works so far have focused on machine unlearning for standard statistical learning models with one variable, which leaves it unknown how to design a machine unlearning method to meet all the desiderata above.\nMachine unlearning for minimax models becomes a pressing problem because the trained minimax models also have a heavy reliance on the training data, while the users contributing data are granted the right to be forgotten. In this paper, we study the machine unlearning problem for minimax models under the -certified machine unlearning framework. We collect in Table 1 ###reference_### the results in this paper and comparisons with baseline methods that are adapted from previous papers to -certified machine unlearning.\nOur main contributions can be summarized as follows.\nCertified minimax unlearning algorithm: We develop -certified minimax unlearning algorithm under the setting of the strongly-convex-strongly-concave loss function. To sufficiently remove the data influence, the algorithm introduces the total Hessian consisting of both direct Hessian and indirect Hessian, where the latter is crucial to account for the inter-dependence between the primal and dual variables in minimax models. It leads to the complete Newton-based minimax unlearning update. Subsequently, we introduce the Gaussian mechanism from DP to achieve the -minimax unlearning certification, which requires careful analysis for the closeness between the complete Newton updated variables and the retraining-from-scratch variables.\nGeneralization: We provide generalization results for our certified minimax unlearning algorithm in terms of the population weak and strong primal-dual risk, which is a common generalization measure for minimax models.\nDeletion capacity: We establish the deletion capacity result, which guarantees that our unlearning algorithm can retain the generalization rates for up to deleted samples.\nIt matches the state-of-the-art result under the standard statistical unlearning setting that can be regarded as a special case of our minimax setting.\nExtension to more general losses: We extend the certified minimax unlearning to more general loss functions, including convex-concave, strongly-convex-concave, and convex-strongly-concave losses, and provide the corresponding -certification, population primal-dual risk, and deletion capacity results.\nExtension with better efficiency: We develop a more computationally efficient extension, which can also support successive and online deletion requests. It saves the re-computation of the total Hessian matrix during the unlearning phase, where the minimax unlearning update can be regarded as a total Hessian-based infinitesimal jackknife. It also comes with has slightly smaller population primal-dual risk though the overall rates of the risk and deletion capacity remain the same."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Related work",
|
| 15 |
+
"text": "Machine unlearning receives increasing research attention in recent years, mainly due to the growing concerns about the privacy of user data that are utilized for machine learning model training. Since the earliest work by Cao and Yang (2015 ###reference_b7###), a variety of machine unlearning methods have been proposed, which can be roughly divided into two categories: exact unlearning and approximate unlearning.\nExact machine unlearning. Methods for exact machine unlearning aim to produce models that perform identically to the models retrained from scratch.\nSome exact unlearning methods are designed for specific machine learning models like k-means clustering (Ginart et al., 2019 ###reference_b21###) and random forests (Brophy and Lowd, 2021 ###reference_b6###).\nSISA (Bourtoule et al., 2021 ###reference_b5###) proposes a general exact unlearning framework based on sharding and slicing the training data into multiple non-overlapping shards and training independently on each shard. During unlearning, SISA retrains only on the shards containing the data to be removed.\nGraphEraser (Chen et al., 2022b ###reference_b9###) and RecEraser (Chen et al., 2022a ###reference_b8###) further extend SISA to unlearning for graph neural networks and recommendation systems, respectively.\nApproximate machine unlearning.\nApproximate machine unlearning methods propose to make a tradeoff between the exactness in data removal and computational/memory efficiency. Prior works propose diverse ways to update the model parameter and offer different types of unlearning certification. When it comes to the unlearning update, many existing works consider the Newton update-related unlearning step where the Hessian matrix of the loss function plays a key role (Guo et al., 2020 ###reference_b27###; Golatkar et al., 2020a ###reference_b22###; Peste et al., 2021 ###reference_b44###; Sekhari et al., 2021 ###reference_b46###; Golatkar et al., 2021 ###reference_b24###; Mahadevan and Mathioudakis, 2021 ###reference_b37###; Suriyakumar and Wilson, 2022 ###reference_b50###; Mehta et al., 2022 ###reference_b39###; Chien et al., 2023b ###reference_b13###). This unlearning update is motivated by influence functions (Koh and Liang, 2017 ###reference_b29###). In order to alleviate the computation of the Hessian, Golatkar et al. (2020a ###reference_b22###) and Peste et al. (2021 ###reference_b44###) utilize Fisher Information Matrix to approximate the Hessian, mitigating its expensive computation and inversion.\nMehta et al. (2022 ###reference_b39###) provide a variant of conditional independence coefficient to select sufficient sets for unlearning, avoiding the need to invert the entire Hessian matrix.\nML-forgetting (Golatkar et al., 2021 ###reference_b24###) trains a linear weights set on the core dataset which would not change by standard training and a linear weights set on the user dataset containing data to be forgotten. They use an optimization problem to approximate the forgetting Newton update. Suriyakumar and Wilson (2022 ###reference_b50###) leverage the proximal infinitesimal jackknife as the unlearning step in order to be applied to nonsmooth loss functions. In addition, they can achieve better computational efficiency and are capable of dealing with online delete requests. There are also many other designs achieving different degrees of speedup (Wu et al., 2020 ###reference_b59###; Nguyen et al., 2020 ###reference_b42###; Neel et al., 2021 ###reference_b40###; Zhang et al., 2022c ###reference_b69###).\nApart from the various designs for the unlearning update, there are also different definitions of certified machine unlearning.\nEarly works like Guo et al. (2020 ###reference_b27###) introduce a certified data-removal mechanism that adds random perturbations to the loss function at training time. Golatkar et al. (2020a ###reference_b22###) introduce an information-theoretic-based certified unlearning notion and also add random noise to ensure the certification, which is specific to the Fisher Information Matrix and not general enough.\nMore recently, Sekhari et al. (2021 ###reference_b46###) propose the -certified machine unlearning definition that does not require introducing additional randomization during training.\nMore essential, Sekhari et al. (2021 ###reference_b46###) points out the importance of providing the generalization performance after machine unlearning. Sekhari et al. (2021 ###reference_b46###); Suriyakumar and Wilson (2022 ###reference_b50###) establish the generalization result in terms of the population risk and derive the deletion capacity guarantee.\nHowever, most existing works only consider machine unlearning for STL models that minimize a single variable. None of the prior works provide ertified machine unlearning pertaining to minimax models, for which the generalization and deletion capacity guarantees are still unknown."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "3",
|
| 19 |
+
"parent_section_id": null,
|
| 20 |
+
"section_name": "Preliminaries and Baseline Solution",
|
| 21 |
+
"text": ""
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "3.1",
|
| 25 |
+
"parent_section_id": "3",
|
| 26 |
+
"section_name": "Minimax Learning",
|
| 27 |
+
"text": "The goal of minimax learning is to optimize the population loss , given by\nwhere is the loss function, is a data instance from the distribution , and are closed convex domains with regard to primal and dual variables, respectively.\nSince the data distribution is unknown in practice, minimax learning turns to optimize the empirical loss , given by,\nwhere is the training dataset with .\nWe will consider -Lipschitz, -smooth and -strongly-convex--strongly-concave loss functions, which are described in Assumption 1 ###reference_umption1###&2 ###reference_umption2### blow and more details can be found in Appendix A ###reference_###.\nFor any , the function is -Lipschitz and with -Lipschitz gradients and -Lipschitz Hessians on the closed convex domain . Moreover, is convex on for any and concave on for any .\nFor any , the function satisfies Assumtion 1 ###reference_umption1### and is -strongly convex on for any and -strongly concave on for any .\nDenote a randomized minimax learning algorithm by and its trained variables by . The generalization performance is a top concern of the trained model variables (Thekumparampil et al., 2019 ###reference_b52###; Zhang et al., 2020 ###reference_b64###; Lei et al., 2021 ###reference_b30###; Farnia and Ozdaglar, 2021 ###reference_b19###; Zhang et al., 2021 ###reference_b65###, 2022b ###reference_b68###; Ozdaglar et al., 2022 ###reference_b43###), which can be measured by population weak primal-dual (PD) risk or population strong PD risk, as formalized below.\nThe population weak PD risk of , and the population strong PD risk of , are defined as\nNotations.\nWe introduce the following notations that will be used in the sequel.\nFor a twice differentiable function with the arguments and , we use and to denote the direct gradient of w.r.t. and , respectively\nand denote its Jacobian matrix as .\nWe use , , , to denote the second order partial derivatives w.r.t. and , correspondingly\nand denote its Hessian matrix as .\nWe define the total Hessian of the function w.r.t. and : and\n,\nwhere and are the shorthand of and , respectively, when and are invertible.\nWe also use the shorthand notation ."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3.2",
|
| 31 |
+
"parent_section_id": "3",
|
| 32 |
+
"section_name": "-Certified Machine Unlearning",
|
| 33 |
+
"text": "An unlearning algorithm for minimax models receives the output of a minimax learning algorithm , the set of delete requests and some additional memory variables as input and returns an updated model , aiming to remove the influence of . For the memory variables in , it will not contain the entire training set, but instead its size is independent of the training data size . The mapping of an unlearning algorithm can be formulated as . We now give the notion of -certified unlearning introduced by Sekhari et al. (2021 ###reference_b46###), which is inspired by the definition of differential privacy (Dwork et al., 2006 ###reference_b17###).\nLet be the domain of . For all of size , set of delete requests such that , the pair of learning algorithm and unlearning algorithm is -certified unlearning, if and , the following two conditions are satisfied:\nwhere denotes the empty set and denotes the memory variables available to .\nThe above definition ensures the indistinguishability between the output distribution of (i) the model trained on the set and then unlearned with delete requests and (ii) the model trained on the set and then unlearned with an empty set. Specifically, the unlearning algorithm simply adds perturbations to the output of when the set of delete requests is empty.\nDeletion Capacity. Under the definition of certified unlearning, Sekhari et al. (2021 ###reference_b46###) introduce the definition of deletion capacity, which formalizes how many samples can be deleted while still maintaining good guarantees on test loss. Here, we utilize the population primal-dual risk defined in Definition 1 ###reference_inition1### instead of the excess population risk utilized for STL models.\nLet and be a dataset of size drawn i.i.d from the data distribution . Let be a minimax model and be the set of deletion requests. For a pair of minimax learning algorithm and minimax unlearning algorithm that satisfies -unlearning, the deletion capacity is defined as the maximum number of samples that can be unlearned while still ensuring the population primal-dual (weak PD or strong PD) risk is at most . Let the expectation takes over and the outputs of the algorithms and . Let denotes the dimension of domain and denotes the dimension of domain , specifically,\nwhere the ouputs and of the minimax unlearning algorithm refer to parameter and , respectively. could be the population weak PD risk or population strong PD risk of .\nWe set (or any other small arbitrary constant) throughout the paper."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "3.3",
|
| 37 |
+
"parent_section_id": "3",
|
| 38 |
+
"section_name": "Baseline Solution: Certified Minimax Unlearning via Differential Privacy",
|
| 39 |
+
"text": "Since Definition 2 ###reference_inition2### is motivated by differential privacy (DP), it is a natural way to use tools from DP for machine unlearning. For a differentially private learning algorithm with edit distance in neighboring datasets, the unlearning algorithm simply returns its output without any changes and is independent of the delete requests as well as the memory variables , i.e., .\nA number of differentially private minimax learning algorithms can be applied, e.g., Zhang et al. (2022a ###reference_b66###); Yang et al. (2022 ###reference_b63###); Bassily et al. (2023 ###reference_b3###). For instance, we can obtain the output by calling Algorithm 3 in Zhang et al. (2022a ###reference_b66###). Under Assumption 1 ###reference_umption1###&2 ###reference_umption2###, we then get the population strong PD risk based on (Zhang et al., 2022a ###reference_b66###, Theorem 4.3) and the group privacy property of DP (Vadhan, 2017 ###reference_b54###, Lemma 7.2.2), as follows,\nwhere we let , , , and be the edit distance between datasets (i.e., the original dataset and the remaining dataset after removing samples to be forgotten).\nThe algorithm satisfies -DP for any set of size , that is,\nSince we have and , the above privacy guarantee can be converted to the minimax unlearning guarantee in Definition 2 ###reference_inition2###, implying that the pair is -certified minimax unlearning. According to Definition 3 ###reference_inition3###, the population strong PD risk in eq.(7 ###reference_###) yields the following bound on deletion capacity.\nDenote . There exists a polynomial time learning algorithm and unlearning algorithm for minimax problem of the form such that the deletion capacity is:\nwhere the constant in -notation depends on the properties of the loss function (e.g., strongly convexity and strongly concavity parameters, Lipchitz continuity and smoothness parameters).\nHowever, this DP minimax learning baseline approach provides an inferior deletion capacity. In the following sections, we show that the in the denominator of eq.(8 ###reference_###) can be further reduced to ."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "4",
|
| 43 |
+
"parent_section_id": null,
|
| 44 |
+
"section_name": "Certified Minimax Unlearning",
|
| 45 |
+
"text": "In this section, we focus on the setting of the strongly-convex-strongly-concave loss function. We first provide the intuition for the design of the minimax unlearning step in Sec.4.1 ###reference_###, then provide the formal algorithm in Sec.4.2 ###reference_### and a more efficient extension in Sec.4.3 ###reference_### with analysis of minimax unlearning certification, generalization result, and deletion capacity in Sec.4.4 ###reference_###. We will provide extensions to more general loss settings in Sec.5 ###reference_###. The proofs for the theorems presented in this and the next sections can be found in Appendix B ###reference_### and C ###reference_###, respectively."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "4.1",
|
| 49 |
+
"parent_section_id": "4",
|
| 50 |
+
"section_name": "Intuition for Minimax Unlearning Update",
|
| 51 |
+
"text": "To begin with, we provide an informal derivation for minimax unlearning update to illustrate its design intuition. Given the training set of size and the deletion subset of size , the aim is to approximate the optimal solution of the loss on the remaining dataset , given by,\nMeanwhile, we have the optimal solution to the original loss after minimax learning. Taking unlearning for instance, by using a first-order Taylor expansion for around , we have\nSince is a minimizer of , from the first-order optimality condition, we can get . Now given an auxiliary function (more best response auxiliary functions are introduced in Appendix A ###reference_###, Definition 8 ###reference_inition8###), we have . We further get\nwhere the approximate equation leaving out the term which is bounded in Appendix A ###reference_###, Lemma 2 ###reference_ma2###, and does not affect the overall unlearning guarantee. The approximate equation is the linear approximation step and is the response Jacobian of the auxiliary function . And the approximate equation is due to the implicit function theorem. This gives that\nwhich implies the following approximation of :\nThe above informal derivation indicates that the minimax unlearning update relies on the total Hessian to sufficiently remove the data influence Liu et al. (2023 ###reference_b34###); Zhang et al. (2023 ###reference_b67###), rather than the conventional Hessian that appears in standard statistical unlearning (Guo et al., 2020 ###reference_b27###; Sekhari et al., 2021 ###reference_b46###; Suriyakumar and Wilson, 2022 ###reference_b50###; Mehta et al., 2022 ###reference_b39###). The update in eq.(13 ###reference_###) has a close relation to the complete Newton step in the second-order minimax optimization literature Zhang et al. (2020 ###reference_b64###), which motivates the complete Newton-based minimax unlearning. However, due to the various approximations in the above informal derivation, we cannot have a certified minimax unlearning guarantee. Below, we will formally derive the upper bound for these approximations in the closeness upper bound analysis. Based on the closeness upper bound, we will introduce the Gaussian mechanism to yield distribution indistinguishably result in the sense of -certified minimax unlearning."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "4.2",
|
| 55 |
+
"parent_section_id": "4",
|
| 56 |
+
"section_name": "Proposed Certified Minimax Unlearning",
|
| 57 |
+
"text": "We first provide algorithms under the setting of the smooth and strongly-convex-strongly-concave (SC-SC) loss function as described in Assumptions 1 ###reference_umption1###&2 ###reference_umption2###.\nWe denote our learning algorithm by and the pseudocode is shown in Algorithm 1 ###reference_###. Given a dataset of size drawn independently from some distribution , algorithm computes the optimal solution to the empirical risk . then outputs the point as well as the additional memory variables , which computes and stores the total Hessian of at .\nMinimax Unlearning Algorithm.\nWe denote the proposed certified minimax unlearning algorithm by and present its pseudocode in Algorithm 2 ###reference_###. Algorithm takes the following inputs: the set of delete requests of size , the trained minimax model , and the memory variables . To have the certified minimax unlearning for , eq.(15 ###reference_###) computes the total Hessian of by , where the former term can be retrieved from the memory set and the latter is computed on the samples to be deleted; eq.(17 ###reference_###) computes the intermediate by the complete Newton step based on the total Hessian ; Line 3 injects calibrated Gaussian noise to ensure -certified minimax unlearning. The certified minimax unlearning for is symmetric. We provide detailed analysis for Algorithm 2 ###reference_### including minimax unlearning certification, generalization results,\nand deletion capacity in Appendix B.1 ###reference_###."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "4.3",
|
| 61 |
+
"parent_section_id": "4",
|
| 62 |
+
"section_name": "Certified Minimax Unlearning without Total Hessian Re-computation",
|
| 63 |
+
"text": "We extend Algorithm 2 ###reference_### and propose Algorithm 3 ###reference_### to reduce the computational cost of Algorithm 2 ###reference_###. The complete Newton steps in eq.(19 ###reference_###) and eq.(20 ###reference_###) utilize the total Hessian and that are directly retrieved from the memory, rather than the updated total Hessian and used in Algorithm 2 ###reference_###. The form in eq.(19 ###reference_###) and eq.(20 ###reference_###) can also be regarded as the total Hessian extension of the infinitesimal jackknife. In this way, it gets rid of the computationally demanding part of re-evaluating the total Hessian for samples to be deleted, which significantly reduces the computational cost. It turns out to be the same computational complexity as the state-of-the-art certified unlearning method developed for STL models (Suriyakumar and Wilson, 2022 ###reference_b50###). Moreover, Algorithm 3 ###reference_### can be more appealing for the successive data deletion setting."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "4.4",
|
| 67 |
+
"parent_section_id": "4",
|
| 68 |
+
"section_name": "Analysis for Algorithm 3",
|
| 69 |
+
"text": "-Certificated Unlearning Guarantee.\nThe intermediate variables are distinguishable in distribution from the retraining-from-scratch variables because they are deterministic and the Taylor expansion introduces a certain amount of approximation. The following lemma quantifies the closeness between and , which can be regarded as the \u201csensitivity\u201d when applying the Gaussian mechanism.\nSuppose the loss function satisfies Assumption 1 ###reference_umption1### and 2 ###reference_umption2###, and . Let .\nThen, we have the closeness bound between in Line 1 of Algorithm 3 ###reference_### and in eq.(9 ###reference_###):\nEquipped with Lemma 10 ###reference_ma10###, we have the following certified unlearning guarantee by adding Gaussian noise calibrated according to the above closeness result. Due to the minimax structure, our analysis is more involved than the STL case (Sekhari et al., 2021 ###reference_b46###; Suriyakumar and Wilson, 2022 ###reference_b50###).\nUnder the same settings of Lemma 1 ###reference_ma1###, our minimax learning algorithm and unlearning algorithm is -certified minimax unlearning if we choose\nGeneralization Guarantee.\nTheorem 3 ###reference_orem3### below provides the generalization result in terms of the population PD risk for the minimax unlearning algorithm .\nUnder the same settings of Lemma 1 ###reference_ma1### and denote ,\nthe population weak and strong PD risk for the certified minimax unlearning variables returned by Algorithm 3 ###reference_### are\nDeletion Capacity.\nThe population weak and strong PD risk given in Theorem 3 ###reference_orem3### for the output of unlearning algorithms provides the following bound on deletion capacity.\nUnder the same settings of Lemma 1 ###reference_ma1### and denote , the deletion capacity of Algorithm 3 ###reference_### is\nwhere the constant depends on and of the loss function ."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "5",
|
| 73 |
+
"parent_section_id": null,
|
| 74 |
+
"section_name": "Certified Minimax Unlearning for Convex-Concave Loss Function",
|
| 75 |
+
"text": "We further extend the certified minimax unlearning for the convex-concave loss function. In addition, Appendix C ###reference_### will provide the extension to convex-strongly-concave and strongly-convex-concave loss functions. Give the convex-concave loss function , similar to the unlearning for STL models (Sekhari et al., 2021 ###reference_b46###), we define the regularized function as . Suppose the function satisfies Assumption 1 ###reference_umption1###, then the function is -strongly convex in , -strongly concave in , -Lipschitz, -gradient Lipschitz and -Hessian Lipschitz. It suffices to apply the minimax learning and unlearning algorithms in Sec.4 ###reference_### to the regularized loss function with a properly chosen . We denote the learning and unlearning algorithms for convex-concave losses as and . Their implementation details are given in Appendix C ###reference_###.\nWe suppose the SC-SC regularization parameter satisfies .\nTheorem 5 ###reference_orem5### below summarizes guarantees of -certified unlearning and population primal-dual risk (weak and strong) for Algorithm .\nLet Assumption 1 ###reference_umption1### hold and . Suppose the parameter spaces and are bounded so that and . We have,\n-Minimax Unlearning Certification: Our minimax learning algorithm and unlearning algorithm is -certified minimax unlearning.\nPopulation Weak PD Risk: The population weak PD risk for by algorithm is\nIn particular, by setting below\nwe have the following population weak PD risk,\nwhere are constants that depend only on and .\nPopulation Strong PD Risk: The population strong PD risk for by algorithm is\nIn particular, by setting below\nwe have the following population strong PD risk,\nwhere are constants that depend only on and .\nDeletion Capacity: The deletion capacity of Algorithm is\nwhere the constant depends on the constants and ."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "6",
|
| 79 |
+
"parent_section_id": null,
|
| 80 |
+
"section_name": "Conclusion",
|
| 81 |
+
"text": "In this paper, we have studied the certified machine unlearning for minimax models with a focus on the generalization rates and deletion capacity, while existing works in this area largely focus on standard statistical learning models. We have provided a new minimax unlearning algorithm composed of the total Hessian-based complete Newton update and the Gaussian mechanism-based perturbation, which comes with rigorous -unlearning certification. We have established generalization results in terms of the population weak and strong primal-dual risk and the correspondingly defined deletion capacity results for the strongly-convex-strongly-concave loss functions, both of which match the state-of-the-art rates obtained for standard statistical learning models. We have also provided extensions to other loss types like the convex-concave loss function. In addition, we have provided a more computationally efficient extension by getting rid of the total Hessian re-computation during the minimax unlearning phase, which can be more appealing for the successive data deletion setting. Although our bound for deletion capacity is better than that of DP by an order of and matches the state-of-the-art result established for unlearning under the STL setting, it remains unclear whether this bound is tight or not. In future work, we plan to extend to more general settings like the nonconvex-nonconcave loss function setting."
|
| 82 |
+
}
|
| 83 |
+
],
|
| 84 |
+
"appendix": [
|
| 85 |
+
{
|
| 86 |
+
"section_id": "Appendix 1",
|
| 87 |
+
"parent_section_id": null,
|
| 88 |
+
"section_name": "Appendix A Additional Definitions and Supporting Lemmas",
|
| 89 |
+
"text": "In this section, we provide additional definitions and supporting lemmas. In the next two sections, Sec.B ###reference_### contains missing proofs in Sec.4 ###reference_### and the online extension to support successive unlearning setting. Sec.C ###reference_### contains missing proofs in Sec.5 ###reference_###, as well as detailed algorithm descriptions for the general convex-concave loss function setting.\nWe first recall the following standard definitions for the loss function from optimization literature.\nThe function is -Lipschitz, i.e., there exists a constant such that for all , and , it holds that\nThe function has -Lipschitz gradients, i.e., there exists a constant such that for all , and , it holds that\nwhere recall that .\nThe function has -Lipschitz Hessian, i.e., there exists a constant such that for all , and , it holds that\nwhere recall that .\nThe function is -strongly convex on and -strongly concave on , i.e., there exist constants and such that for all , and , it holds that\nWe introduce auxiliary functions and , given by\nand we have and . We can similarly introduce and as\nand we have and by this definition.\nIn addition, we define the primal function , which has gradient and Hessian (i.e., the total Hessian of ). The dual function, its gradient, and Hessian can be similarly defined, e.g., .\nThe following lemma provides the distance between and . Similar result can be derived for the distance between and .\nUnder Assumption 1 ###reference_umption1### and Assumption2 ###reference_umption2###, the variables and (i.e., ) defined in Algorithm 1 ###reference_### satisfy the following distance bound\nWe observe that\nwhere the inequality () follows from that is the maximizer of the function , thus . The inequality () is due to the fact that the function is -Lipschitz. Also note that the function is -strongly concave, thus we have\nEq.(39 ###reference_###) and eq.(40 ###reference_###) together give that\nwhich implies that .\n\u220e\nThe following lemma provides the distance between and .\nUnder Assumption 1 ###reference_umption1### and Assumption2 ###reference_umption2###, the variables defined in eq.(9 ###reference_###) and defined in Algorithm 1 ###reference_### satisfy the following guarantees\nWe begin with the -part,\nwhere the inequality () holds because is the minimizer of the function , thus ,\nand the inequality () follows from the fact that the function is -Lipschitz. Since the function is -strongly convex, we further get\nEq.(43 ###reference_###) and eq.(44 ###reference_###) together gives that\nThus, we get .\nFor the -part, we similarly have\nwhere the inequality () follows from that is the maximizer of the function , thus .\nThe inequality () is due to the fact that the function is -Lipschitz. In addition, by the strongly-concave assumption of is , we have\nBy eq.(46 ###reference_###) and eq.(47 ###reference_###), we get that\nThus, we have .\n\u220e\nIn the following, we recall several lemmas (i.e., Lemma 4 ###reference_ma4### to Lemma 8 ###reference_ma8###) from existing minimax optimization literature for completeness.\nUnder Assumption 1 ###reference_umption1### and Assumption2 ###reference_umption2###, for any , the function is -Lipschitz.\nBy the optimality condition of the function , we have\nSumming the two inequalities above yields\nSince the function is -strongly concave in , we have\nBy eq.(49 ###reference_###) and eq.(50 ###reference_###) with the -Lipschitz continuity of , we further get\nConsequently, we have\n\u220e\nThe above lemma can be similarly derived for to obtain that the best response auxiliary function is -Lipschitz. In the next three lemmas, we focus on the -part and omit the -part.\nDenote . Under Assumption 1 ###reference_umption1### and Assumption2 ###reference_umption2###, for any , we have\nDenote . Under Assumption 1 ###reference_umption1### and Assumption2 ###reference_umption2###, for any , we have\nwhere .\nRecall the definition of the primal function and its gradient . Due to the optimality of , we have\nBy taking the total derivative with respect to , we get\nTaking the total derivative of again on , we further have\nBased on the equality of and above and Lemma 5 ###reference_ma5###, we have\n\u220e\nUnder Assumption 1 ###reference_umption1### and Assumption2 ###reference_umption2###, for all and , we have .\nBy the definition of the total Hessian, we have\nwhere the inequality () uses the triangle inequality and the inequality () is due to the function has -Lipschitz gradients and is -strongly concave in , thus we have , , and .\n\u220e\nUnder Assumption 1 ###reference_umption1### and Assumption2 ###reference_umption2###, the population weak PD risk for the minimax learning variables returned by Algorithm 1 ###reference_### has\nUnder Assumption 1 ###reference_umption1### and Assumption2 ###reference_umption2###, the population strong PD risk for the minimax learning variables returned by Algorithm 1 ###reference_### has"
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"section_id": "Appendix 2",
|
| 93 |
+
"parent_section_id": null,
|
| 94 |
+
"section_name": "Appendix B Detailed Algorithm Analysis and Missing Proofs in Section 4",
|
| 95 |
+
"text": "In the following, we provide the analysis for Algorithm 2 ###reference_### in terms of guarantees of -certified unlearning, population primal-dual risk, and deletion capacity and the corresponding proofs.\nSuppose the loss function satisfies Assumption 1 ###reference_umption1### and 2 ###reference_umption2###, and . Let . Then, we have the closeness bound between in Line 2 of Algorithm 2 ###reference_### and in eq.(9 ###reference_###):\nRecall that the empirical loss functions and are\nWe focus on the key term , which has the following conversions\nWe denote . For the first term on the right-hand side of the inequality in eq.(64 ###reference_###), we have\nwhere the inequality () is by Lemma 6 ###reference_ma6### and the inequality () is by Lemma 3 ###reference_ma3###.\nFor the second term on the right-hand side of the inequality in eq.(64 ###reference_###), we have\nwhere the inequality () follows by the fact that the function is -Lipschitz continuous and . The inequality () holds because Lemma 4 ###reference_ma4###, and the inequality () is by Lemma 3 ###reference_ma3###.\nFor the third term on the right-hand side of the inequality in eq.(64 ###reference_###), we have\nwhere the first inequality is by Lemma 7 ###reference_ma7###. Plugging eq.(65 ###reference_###), eq.(66 ###reference_###) and eq.(67 ###reference_###) into eq.(64 ###reference_###), we further get\nThe above derivation yields an upper bound result. In the following, we derive a lower bound result. Let be the vector satisfying the following relation,\nSince we have and due to the optimality of , plugging eq.(69 ###reference_###) into eq.(68 ###reference_###), we get that\nFor the left-hand side of eq.(70 ###reference_###), with , we have\nwhere the second inequality is by Lemma 7 ###reference_ma7###. Combining eq.(70 ###reference_###), eq.(68 ###reference_###), and the definition of the vector , we get that\nSymmetrically, we can get that .\n\u220e\nUnder the same settings of Lemma 10 ###reference_ma10###, our minimax learning algorithm and unlearning algorithm is -certified minimax unlearning if we choose\nOur proof for -minimax unlearning certification is similar to the one used for the differential privacy guarantee of the Gaussian mechanism (Dwork et al., 2014 ###reference_b18###).\nLet be the output of the learning algorithm trained on dataset and be the output of the unlearning algorithm running with delete requests , the learned model , and the memory variables . Then we have and . We also denote the intermediate variables before adding noise in algorithm as , and we have and .\nSmilarly, let be the output of the learning algorithm trained on dataset and be the output of the unlearning algorithm running with delete requests , the learned model , and the memory variables . Then we have and . We also denote the intermediate variables before adding noise in algorithm as , and we have and . Note that and .\nWe sample the noise and with the scale:\nwhere and are given in Lemma 10 ###reference_ma10###. Then, following the same proof as Dwork et al. (2014 ###reference_b18###, Theorem A.1) together with the composition property of DP (Vadhan, 2017 ###reference_b54###, Lemma 7.2.3), we get that, for any set where ,\nwhich implies that the algorithm pair and is -certified minimax unlearning.\n\u220e\nUnder the same settings of Lemma 10 ###reference_ma10### and denote , the population weak and strong PD risk for the certified minimax unlearning variables returned by Algorithm 2 ###reference_### are\nWe begin with the population weak PD risk for the certified minimax unlearning variable , which has the following conversions,\nwhere the inequality () holds because the population loss function \nis -Lipschitz continuous. The inequality () is by Lemma 8 ###reference_ma8###.\nBy recalling the unlearning update step in Algorithm 2 ###reference_###, we have\nwhere the vector is drawn independently from . From the relation in eq.(78 ###reference_###), we further get\nwhere the inequality () is by the triangle inequality and the inequality () follows from the relation in eq.(71 ###reference_###), together with the Jensen\u2019s inequality to bound . The equality () holds because the vector and thus we have . Furthermore, the inequality () is due to the fact that is -Lipshitz continuous.\nSymmetrically, we have\nPlugging eq.(79 ###reference_###) and eq.(80 ###reference_###) into eq.(77 ###reference_###) with we get\nWith the noise scale and being equal to , we can get our generalization guarantee with population weak PD risk:\nFor the population strong PD risk , similarly, we have\nwhere inequality () is due to the fact that the population loss function is -Lipschitz continuous. The inequality () uses eq.(79 ###reference_###), eq.(80 ###reference_###) and Lemma 9 ###reference_ma9###. With the same noise scale above, we can get the generalization guarantee in terms of strong PD risk below,\n\u220e\nUnder the same settings of Lemma 10 ###reference_ma10### and denote , the deletion capacity of Algorithm 2 ###reference_### is\nwhere the constant depends on and of the loss function .\nBy the definition of deletion capacity, in order to ensure the population PD risk derived in Theorem 7 ###reference_orem7### is bounded by , it suffices to let:\nwhere the constant depends on the properties of the loss function .\n\u220e\nTo support successive unlearning requests, similar to the STL case (Suriyakumar and Wilson, 2022 ###reference_b50###), we further provide an efficient and online minimax unlearning algorithm (denoted by ). The pseudocode of is given in Algorithm 4 ###reference_###. Its certified minimax unlearning guarantee, generalization, and deletion capacity can be identically yielded as Algorithm 3 ###reference_###, which are omitted here."
|
| 96 |
+
},
|
| 97 |
+
{
|
| 98 |
+
"section_id": "Appendix 3",
|
| 99 |
+
"parent_section_id": null,
|
| 100 |
+
"section_name": "Appendix C Detailed Algorithm Descriptions and Missing Proofs in Section 5",
|
| 101 |
+
"text": "In this section, we provide minimax learning and minimax unlearning algorithms for smooth convex-concave loss functions based on the counterpart algorithms for the SC-SC setting. Given the convex-concave loss function , we define the regularized loss function as . Suppose the function satisfies Assumption 1 ###reference_umption1###, then the function is -strongly convex in , -strongly concave in , -Lipschitz, -gradient Lipschitz and -Hessian Lipschitz. Thus, we can apply the minimax learning in Algorithm 1 ###reference_### and unlearning in Algorithm 2 ###reference_### to the regularized loss function with a properly chosen . We denote our learning algorithm by and unlearning algorithm by . The pseudocode is provided in Algorithm 5 ###reference_### and Algorithm 6 ###reference_###, respectively. Additionally, we denote the regularized population loss as and regularized empirical loss as .\nSuppose the function is -Lipschitz continuous. Define the function as\nGiven a dataset and denote . Then, the variables satisfy and .\nDue to the optimality of , we have\nPlugging in the definition of the function in the above, we get that\nThen, using the triangle inequality, we have\nwhere the last inequality holds because the function is -Lipschitz continuous. Thus we have . Similarly, we can get .\n\u220e\nLemma 11 ###reference_ma11### implies that the empirical optimizer returned by Algorithm 6 ###reference_### satisfies and . Thus our domain of interest are and . Over the set , the function is -Lipschitz continuous. Also, with , has -Lipschitz gradients.\nDenote and , then the function is -Lipschitz continuous and has -Lipschitz gradients. Let be the optimal solution of the loss function on the remaining dataset, i.e.,\nAdditionally, we have and .\nUnder the settings of Theorem 5 ###reference_orem5###, for any , the population weak and strong PD risk for the minimax learning variables returned by Algorithm 5 ###reference_### are\nFor the function , an application of Lemma 8 ###reference_ma8### gives that\nBy the assumption of bounded parameter spaces and so that and , we have the following derivations for the population weak PD risk,\nSimilarly, an application of Lemma 9 ###reference_ma9### gives that\nAnd we can get the population strong PD risk with the following conversions,\n\u220e\nUnder the settings of Theorem 5 ###reference_orem5###, we have the closeness bound between the intermediate variables in Algorithm 6 ###reference_### and in eq.(106 ###reference_6###):\nSince we now run the algorithms and with the regularized loss function , the proof is identical to that of Lemma 10 ###reference_ma10###.\n\u220e\nEquipped with the supporting lemmas above, the proof of Theorem 5 ###reference_orem5### can be separated into the proofs of the following three lemmas.\nUnder the settings of Theorem 5 ###reference_orem5###, our minimax learning algorithm and unlearning algorithm is -certified minimax unlearning if we choose\nWith the closeness upper bound in Lemma 13 ###reference_ma13### and the given noise scales in eq.(113 ###reference_3###), the proof is identical to that of Theorem 6 ###reference_orem6###.\n\u220e\nUnder the settings of Theorem 5 ###reference_orem5###, the population weak and strong PD risk for returned by Algorithm 6 ###reference_### are\nFor the population weak PD risk, an application of eq.(77 ###reference_###) together with Lemma 12 ###reference_ma12### gives that\nAccording to Algorithm 6 ###reference_###, we have the unlearning update step\nwhere . From the relation above, we further get\nwhere the inequality () uses the triangle inequality and the inequality () follows from an application of eq.(71 ###reference_###), together with the Jensen\u2019s inequality to bound . The equality () holds because the vector and thus we have . The inequality () uses the definition of the function and the triangle inequality. The inequality () is due to the fact that is -Lipshitz continuous and Lemma 11 ###reference_ma11###. Symmetrically, we have\nPlugging eq.(117 ###reference_7###) and eq.(118 ###reference_8###) into eq.(115 ###reference_5###) with noise scales given in Lemma 14 ###reference_ma14###, we can get our generalization guarantee in terms of population weak PD risk:\nSimilarly, using an application of eq.(83 ###reference_###) together with Lemma 12 ###reference_ma12###, Lemma 14 ###reference_ma14###, eq.(117 ###reference_7###) and eq.(118 ###reference_8###), we can get the following population strong PD risk:\n\u220e\nUnder the settings of Theorem 5 ###reference_orem5###, the deletion capacity of Algorithm 6 ###reference_### is\nwhere the constant depends on and .\nBy the definition of deletion capacity, in order to ensure the population PD risk derived in Lemma 15 ###reference_ma15### is bounded by , it suffices to let .\n\u220e\nIn this section, we briefly discuss the extension to the smooth C-SC setting. The SC-C setting is symmetric and thus omitted here.\nGiven the loss function that satisfies Assumption 1 ###reference_umption1### with -strong concavity in , we define the regularized function as . Our minimax learning and minimax unlearning algorithms for C-SC loss function denoted by and are given in Algorithm 7 ###reference_### and Algorithm 8 ###reference_### respectively. Additionally, we denote the regularized population loss by and regularized empirical loss by .\nNote that the function is -strongly convex in , -strongly concave in , -Lipschitz, -gradient Lipschitz and -Hessian Lipschitz. We also have . Let be the optimal solution of the loss function on the remaining dataset, i.e.,\nAn application of Lemma 11 ###reference_ma11### implies that the empirical optimizer returned by Algorithm 8 ###reference_### satisfies . Thus our domain of interest are . Over the set , the function is -Lipschitz continuous. Suppose the strongly-convex regularization parameter satisfies , then has -Lipschitz gradients.\nThe corresponding theoretical results are given below.\nLet Assumption 1 ###reference_umption1### hold. Assume the function is -strongly concave in and . Let . Then, we have the closeness bound between the intermediate variables in Algorithm 8 ###reference_### and in eq.(124 ###reference_4###):\nSince we now run the algorithms and with the regularized loss function , the proof is identical to that of Lemma 10 ###reference_ma10###.\n\u220e\nUnder the settings of Lemma 17 ###reference_ma17###, our minimax learning algorithm and unlearning algorithm is -certified minimax unlearning if we choose\nWith the closeness upper bound in Lemma 17 ###reference_ma17### and the given noise scales in eq.(126 ###reference_6###), the proof is identical to that of Theorem 6 ###reference_orem6###.\n\u220e\nUnder the same settings of Lemma 17 ###reference_ma17###, suppose the parameter space is bounded so that , the population weak PD risk for the certified minimax unlearning variables returned by Algorithm 8 ###reference_### is\nwhere . In particular, by setting the regularization parameter as:\nwe have the following population weak PD risk:\nwhere and are constants that depend only on and .\nAn application of (Zhang et al., 2021 ###reference_b65###, Theorem 1) gives that\nUsing the relation above with an application of eq.(77 ###reference_###) and eq.(109 ###reference_9###), we have\nBy an application of eq.(117 ###reference_7###), we further get\nand\nPlugging eq.(132 ###reference_2###) and eq.(133 ###reference_3###) into eq.(131 ###reference_1###) with noise scales given in Lemma 18 ###reference_ma18###, we can get our generalization guarantee:\nwhere .\n\u220e\nUnder the same settings of Lemma 19 ###reference_ma19###,\nthe population strong PD risk for returned by Algorithm 8 ###reference_### is\nwhere . In particular, by setting the regularization parameter as:\nwe have the following population strong PD risk:\nwhere and are constants that depend only on and .\nAn application of Lemma 9 ###reference_ma9### gives that\nUsing an application of eq.(83 ###reference_###) and eq.(111 ###reference_1###), together with eq.(138 ###reference_8###), eq.(132 ###reference_2###), eq.(133 ###reference_3###) and the noise scales given in Lemma 18 ###reference_ma18###, we have\n\u220e\nUnder the same settings as Lemma 19 ###reference_ma19###, the deletion capacity of Algorithm 3 ###reference_### is\nwhere the constant depends on and and .\nBy the definition of deletion capacity, in order to ensure the population PD risk derived in Lemma 19 ###reference_ma19### or Lemma 20 ###reference_ma20### is bounded by , it suffices to let .\n\u220e"
|
| 102 |
+
}
|
| 103 |
+
],
|
| 104 |
+
"tables": {
|
| 105 |
+
"1": {
|
| 106 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S1.T1\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 1: </span><span class=\"ltx_text ltx_font_italic\" id=\"S1.T1.4.2\">Summary of Results. Here (S)C means (strongly-)convex loss function, and (S)C-(S)C means (strongly-)convex-(strongly-)concave loss function. PD means Primal-Dual. is the number of training samples and is the model dimension.</span></figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_align_middle\" id=\"S1.T1.8\">\n<tr class=\"ltx_tr\" id=\"S1.T1.8.5\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S1.T1.8.5.1\" style=\"padding-top:1pt;padding-bottom:1pt;\">Model</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.8.5.2\" style=\"padding-top:1pt;padding-bottom:1pt;\">Unlearning Algorithm</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.8.5.3\" style=\"padding-top:1pt;padding-bottom:1pt;\">Setting</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.8.5.4\" style=\"padding-top:1pt;padding-bottom:1pt;\">Generalization Measure</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.8.5.5\" style=\"padding-top:1pt;padding-bottom:1pt;\">Deletion Capacity</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.5.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S1.T1.5.1.2\" rowspan=\"2\" style=\"padding-top:1pt;padding-bottom:1pt;\"><span class=\"ltx_text\" id=\"S1.T1.5.1.2.1\">STL</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.5.1.3\" style=\"padding-top:1pt;padding-bottom:1pt;\">\n<table class=\"ltx_tabular ltx_align_middle\" id=\"S1.T1.5.1.3.1\">\n<tr class=\"ltx_tr\" id=\"S1.T1.5.1.3.1.1\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S1.T1.5.1.3.1.1.1\" style=\"padding-top:1pt;padding-bottom:1pt;\">DP-based</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.5.1.3.1.2\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S1.T1.5.1.3.1.2.1\" style=\"padding-top:1pt;padding-bottom:1pt;\"><cite class=\"ltx_cite ltx_citemacro_citep\">(Bassily et\u00a0al., <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2312.10336v2#bib.bib2\" title=\"\">2019</a>)</cite></td>\n</tr>\n</table>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.5.1.4\" style=\"padding-top:1pt;padding-bottom:1pt;\">C</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.5.1.5\" rowspan=\"2\" style=\"padding-top:1pt;padding-bottom:1pt;\"><span class=\"ltx_text\" id=\"S1.T1.5.1.5.1\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S1.T1.5.1.5.1.1\">\n<span class=\"ltx_tr\" id=\"S1.T1.5.1.5.1.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S1.T1.5.1.5.1.1.1.1\" style=\"padding-top:1pt;padding-bottom:1pt;\">Population Excess Risk</span></span>\n<span class=\"ltx_tr\" id=\"S1.T1.5.1.5.1.1.2\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S1.T1.5.1.5.1.1.2.1\" style=\"padding-top:1pt;padding-bottom:1pt;\"><cite class=\"ltx_cite ltx_citemacro_citep\">(Sekhari et\u00a0al., <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2312.10336v2#bib.bib46\" title=\"\">2021</a>)</cite></span></span>\n</span></span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.5.1.1\" style=\"padding-top:1pt;padding-bottom:1pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.6.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.6.2.2\" style=\"padding-top:1pt;padding-bottom:1pt;\"><cite class=\"ltx_cite ltx_citemacro_citep\">(Sekhari et\u00a0al., <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2312.10336v2#bib.bib46\" title=\"\">2021</a>)</cite></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.6.2.3\" style=\"padding-top:1pt;padding-bottom:1pt;\">(S)C</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.6.2.1\" style=\"padding-top:1pt;padding-bottom:1pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.7.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_l ltx_border_r ltx_border_t\" id=\"S1.T1.7.3.2\" rowspan=\"5\" style=\"padding-top:1pt;padding-bottom:1pt;\"><span class=\"ltx_text\" id=\"S1.T1.7.3.2.1\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S1.T1.7.3.2.1.1\">\n<span class=\"ltx_tr\" id=\"S1.T1.7.3.2.1.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S1.T1.7.3.2.1.1.1.1\" style=\"padding-top:1pt;padding-bottom:1pt;\">Minimax</span></span>\n<span class=\"ltx_tr\" id=\"S1.T1.7.3.2.1.1.2\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S1.T1.7.3.2.1.1.2.1\" style=\"padding-top:1pt;padding-bottom:1pt;\">Learning</span></span>\n</span></span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.7.3.3\" style=\"padding-top:1pt;padding-bottom:1pt;\">\n<table class=\"ltx_tabular ltx_align_middle\" id=\"S1.T1.7.3.3.1\">\n<tr class=\"ltx_tr\" id=\"S1.T1.7.3.3.1.1\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S1.T1.7.3.3.1.1.1\" style=\"padding-top:1pt;padding-bottom:1pt;\">DP-based</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.7.3.3.1.2\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S1.T1.7.3.3.1.2.1\" style=\"padding-top:1pt;padding-bottom:1pt;\"><cite class=\"ltx_cite ltx_citemacro_citep\">(Zhang et\u00a0al., <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2312.10336v2#bib.bib66\" title=\"\">2022a</a>)</cite></td>\n</tr>\n</table>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.7.3.4\" style=\"padding-top:1pt;padding-bottom:1pt;\">SC-SC</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.7.3.5\" rowspan=\"2\" style=\"padding-top:1pt;padding-bottom:1pt;\"><span class=\"ltx_text\" id=\"S1.T1.7.3.5.1\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S1.T1.7.3.5.1.1\">\n<span class=\"ltx_tr\" id=\"S1.T1.7.3.5.1.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S1.T1.7.3.5.1.1.1.1\" style=\"padding-top:1pt;padding-bottom:1pt;\">Population Strong</span></span>\n<span class=\"ltx_tr\" id=\"S1.T1.7.3.5.1.1.2\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S1.T1.7.3.5.1.1.2.1\" style=\"padding-top:1pt;padding-bottom:1pt;\">PD Risk</span></span>\n</span></span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.7.3.1\" rowspan=\"2\" style=\"padding-top:1pt;padding-bottom:1pt;\"><span class=\"ltx_text\" id=\"S1.T1.7.3.1.1\"></span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.8.6\">\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.8.6.1\" style=\"padding-top:1pt;padding-bottom:1pt;\">\n<table class=\"ltx_tabular ltx_align_middle\" id=\"S1.T1.8.6.1.1\">\n<tr class=\"ltx_tr\" id=\"S1.T1.8.6.1.1.1\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S1.T1.8.6.1.1.1.1\" style=\"padding-top:1pt;padding-bottom:1pt;\">DP-based</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.8.6.1.1.2\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S1.T1.8.6.1.1.2.1\" style=\"padding-top:1pt;padding-bottom:1pt;\"><cite class=\"ltx_cite ltx_citemacro_citep\">(Bassily et\u00a0al., <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2312.10336v2#bib.bib3\" title=\"\">2023</a>)</cite></td>\n</tr>\n</table>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.8.6.2\" style=\"padding-top:1pt;padding-bottom:1pt;\">C-C</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.8.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S1.T1.8.4.2\" style=\"padding-top:1pt;padding-bottom:1pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.8.4.2.1\">Our Work</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S1.T1.8.4.3\" style=\"padding-top:1pt;padding-bottom:1pt;\">(S)C-(S)C</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S1.T1.8.4.4\" style=\"padding-top:1pt;padding-bottom:1pt;\">\n<table class=\"ltx_tabular ltx_align_middle\" id=\"S1.T1.8.4.4.1\">\n<tr class=\"ltx_tr\" id=\"S1.T1.8.4.4.1.1\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S1.T1.8.4.4.1.1.1\" style=\"padding-top:1pt;padding-bottom:1pt;\">Population Weak or</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.8.4.4.1.2\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S1.T1.8.4.4.1.2.1\" style=\"padding-top:1pt;padding-bottom:1pt;\">Strong PD Risk</td>\n</tr>\n</table>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S1.T1.8.4.1\" style=\"padding-top:1pt;padding-bottom:1pt;\"></td>\n</tr>\n</table>\n</figure>",
|
| 107 |
+
"capture": "Table 1: Summary of Results. Here (S)C means (strongly-)convex loss function, and (S)C-(S)C means (strongly-)convex-(strongly-)concave loss function. PD means Primal-Dual. is the number of training samples and is the model dimension."
|
| 108 |
+
}
|
| 109 |
+
},
|
| 110 |
+
"image_paths": {},
|
| 111 |
+
"validation": true,
|
| 112 |
+
"references": [
|
| 113 |
+
{
|
| 114 |
+
"1": {
|
| 115 |
+
"title": "Wasserstein generative adversarial networks.",
|
| 116 |
+
"author": "Martin Arjovsky, Soumith Chintala, and L\u00e9on Bottou.",
|
| 117 |
+
"venue": "In International conference on machine learning, volume 70,\npages 214\u2013223. PMLR, 2017.",
|
| 118 |
+
"url": null
|
| 119 |
+
}
|
| 120 |
+
},
|
| 121 |
+
{
|
| 122 |
+
"2": {
|
| 123 |
+
"title": "Private stochastic convex optimization with optimal rates.",
|
| 124 |
+
"author": "Raef Bassily, Vitaly Feldman, Kunal Talwar, and Abhradeep Guha Thakurta.",
|
| 125 |
+
"venue": "In Advances in Neural Information Processing Systems,\nvolume 32, pages 11279\u201311288, 2019.",
|
| 126 |
+
"url": null
|
| 127 |
+
}
|
| 128 |
+
},
|
| 129 |
+
{
|
| 130 |
+
"3": {
|
| 131 |
+
"title": "Differentially private algorithms for the stochastic saddle point\nproblem with optimal rates for the strong gap.",
|
| 132 |
+
"author": "Raef Bassily, Crist\u00f3bal Guzm\u00e1n, and Michael Menart.",
|
| 133 |
+
"venue": "In Conference on Learning Theory, volume 195, pages\n2482\u20132508. PMLR, 2023.",
|
| 134 |
+
"url": null
|
| 135 |
+
}
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"4": {
|
| 139 |
+
"title": "Optimal algorithms for differentially private stochastic monotone\nvariational inequalities and saddle-point problems.",
|
| 140 |
+
"author": "Digvijay Boob and Crist\u00f3bal Guzm\u00e1n.",
|
| 141 |
+
"venue": "Mathematical Programming, pages 1\u201343, 2023.",
|
| 142 |
+
"url": null
|
| 143 |
+
}
|
| 144 |
+
},
|
| 145 |
+
{
|
| 146 |
+
"5": {
|
| 147 |
+
"title": "Machine unlearning.",
|
| 148 |
+
"author": "Lucas Bourtoule, Varun Chandrasekaran, Christopher A Choquette-Choo, Hengrui\nJia, Adelin Travers, Baiwu Zhang, David Lie, and Nicolas Papernot.",
|
| 149 |
+
"venue": "In 2021 IEEE Symposium on Security and Privacy, pages\n141\u2013159. IEEE, 2021.",
|
| 150 |
+
"url": null
|
| 151 |
+
}
|
| 152 |
+
},
|
| 153 |
+
{
|
| 154 |
+
"6": {
|
| 155 |
+
"title": "Machine unlearning for random forests.",
|
| 156 |
+
"author": "Jonathan Brophy and Daniel Lowd.",
|
| 157 |
+
"venue": "In International Conference on Machine Learning, volume 139,\npages 1092\u20131104. PMLR, 2021.",
|
| 158 |
+
"url": null
|
| 159 |
+
}
|
| 160 |
+
},
|
| 161 |
+
{
|
| 162 |
+
"7": {
|
| 163 |
+
"title": "Towards making systems forget with machine unlearning.",
|
| 164 |
+
"author": "Yinzhi Cao and Junfeng Yang.",
|
| 165 |
+
"venue": "In 2015 IEEE symposium on security and privacy, pages\n463\u2013480. IEEE, 2015.",
|
| 166 |
+
"url": null
|
| 167 |
+
}
|
| 168 |
+
},
|
| 169 |
+
{
|
| 170 |
+
"8": {
|
| 171 |
+
"title": "Recommendation unlearning.",
|
| 172 |
+
"author": "Chong Chen, Fei Sun, Min Zhang, and Bolin Ding.",
|
| 173 |
+
"venue": "In Proceedings of the ACM Web Conference 2022, pages\n2768\u20132777. ACM, 2022a.",
|
| 174 |
+
"url": null
|
| 175 |
+
}
|
| 176 |
+
},
|
| 177 |
+
{
|
| 178 |
+
"9": {
|
| 179 |
+
"title": "Graph unlearning.",
|
| 180 |
+
"author": "Min Chen, Zhikun Zhang, Tianhao Wang, Michael Backes, Mathias Humbert, and Yang\nZhang.",
|
| 181 |
+
"venue": "In Proceedings of the 2022 ACM SIGSAC Conference on Computer\nand Communications Security, pages 499\u2013513. ACM, 2022b.",
|
| 182 |
+
"url": null
|
| 183 |
+
}
|
| 184 |
+
},
|
| 185 |
+
{
|
| 186 |
+
"10": {
|
| 187 |
+
"title": "Boundary unlearning: Rapid forgetting of deep networks via shifting\nthe decision boundary.",
|
| 188 |
+
"author": "Min Chen, Weizhuo Gao, Gaoyang Liu, Kai Peng, and Chen Wang.",
|
| 189 |
+
"venue": "In Proceedings of the IEEE/CVF Conference on Computer Vision\nand Pattern Recognition, pages 7766\u20137775. IEEE, 2023.",
|
| 190 |
+
"url": null
|
| 191 |
+
}
|
| 192 |
+
},
|
| 193 |
+
{
|
| 194 |
+
"11": {
|
| 195 |
+
"title": "Gnndelete: A general strategy for unlearning in graph neural\nnetworks.",
|
| 196 |
+
"author": "Jiali Cheng, George Dasoulas, Huan He, Chirag Agarwal, and Marinka Zitnik.",
|
| 197 |
+
"venue": "In The Eleventh International Conference on Learning\nRepresentations. OpenReview.net, 2023.",
|
| 198 |
+
"url": null
|
| 199 |
+
}
|
| 200 |
+
},
|
| 201 |
+
{
|
| 202 |
+
"12": {
|
| 203 |
+
"title": "Efficient model updates for approximate unlearning of\ngraph-structured data.",
|
| 204 |
+
"author": "Eli Chien, Chao Pan, and Olgica Milenkovic.",
|
| 205 |
+
"venue": "In The Eleventh International Conference on Learning\nRepresentations. OpenReview.net, 2023a.",
|
| 206 |
+
"url": null
|
| 207 |
+
}
|
| 208 |
+
},
|
| 209 |
+
{
|
| 210 |
+
"13": {
|
| 211 |
+
"title": "Efficient model updates for approximate unlearning of\ngraph-structured data.",
|
| 212 |
+
"author": "Eli Chien, Chao Pan, and Olgica Milenkovic.",
|
| 213 |
+
"venue": "In The Eleventh International Conference on Learning\nRepresentations. OpenReview.net, 2023b.",
|
| 214 |
+
"url": null
|
| 215 |
+
}
|
| 216 |
+
},
|
| 217 |
+
{
|
| 218 |
+
"14": {
|
| 219 |
+
"title": "Sbeed: Convergent reinforcement learning with nonlinear function\napproximation.",
|
| 220 |
+
"author": "Bo Dai, Albert Shaw, Lihong Li, Lin Xiao, Niao He, Zhen Liu, Jianshu Chen, and\nLe Song.",
|
| 221 |
+
"venue": "In International Conference on Machine Learning, volume 80,\npages 1125\u20131134. PMLR, 2018.",
|
| 222 |
+
"url": null
|
| 223 |
+
}
|
| 224 |
+
},
|
| 225 |
+
{
|
| 226 |
+
"15": {
|
| 227 |
+
"title": "Hidden poison: Machine unlearning enables camouflaged poisoning\nattacks.",
|
| 228 |
+
"author": "Jimmy Z. Di, Jack Douglas, Jayadev Acharya, Gautam Kamath, and Ayush Sekhari.",
|
| 229 |
+
"venue": "In Advances in Neural Information Processing Systems,\nvolume 37, 2023.",
|
| 230 |
+
"url": null
|
| 231 |
+
}
|
| 232 |
+
},
|
| 233 |
+
{
|
| 234 |
+
"16": {
|
| 235 |
+
"title": "Stochastic variance reduction methods for policy evaluation.",
|
| 236 |
+
"author": "Simon S Du, Jianshu Chen, Lihong Li, Lin Xiao, and Dengyong Zhou.",
|
| 237 |
+
"venue": "In International Conference on Machine Learning, volume 70,\npages 1049\u20131058. PMLR, 2017.",
|
| 238 |
+
"url": null
|
| 239 |
+
}
|
| 240 |
+
},
|
| 241 |
+
{
|
| 242 |
+
"17": {
|
| 243 |
+
"title": "Calibrating noise to sensitivity in private data analysis.",
|
| 244 |
+
"author": "Cynthia Dwork, Frank McSherry, Kobbi Nissim, and Adam Smith.",
|
| 245 |
+
"venue": "In Third Theory of Cryptography Conference, volume 3876, pages\n265\u2013284. Springer, 2006.",
|
| 246 |
+
"url": null
|
| 247 |
+
}
|
| 248 |
+
},
|
| 249 |
+
{
|
| 250 |
+
"18": {
|
| 251 |
+
"title": "The algorithmic foundations of differential privacy.",
|
| 252 |
+
"author": "Cynthia Dwork, Aaron Roth, et al.",
|
| 253 |
+
"venue": "Foundations and Trends\u00ae in Theoretical Computer\nScience, 9:211\u2013407, 2014.",
|
| 254 |
+
"url": null
|
| 255 |
+
}
|
| 256 |
+
},
|
| 257 |
+
{
|
| 258 |
+
"19": {
|
| 259 |
+
"title": "Train simultaneously, generalize better: Stability of gradient-based\nminimax learners.",
|
| 260 |
+
"author": "Farzan Farnia and Asuman Ozdaglar.",
|
| 261 |
+
"venue": "In International Conference on Machine Learning, volume 139,\npages 3174\u20133185. PMLR, 2021.",
|
| 262 |
+
"url": null
|
| 263 |
+
}
|
| 264 |
+
},
|
| 265 |
+
{
|
| 266 |
+
"20": {
|
| 267 |
+
"title": "Ticketed learning\u2013unlearning schemes.",
|
| 268 |
+
"author": "Badih Ghazi, Pritish Kamath, Ravi Kumar, Pasin Manurangsi, Ayush Sekhari, and\nChiyuan Zhang.",
|
| 269 |
+
"venue": "In The Thirty Sixth Annual Conference on Learning Theory,\npages 5110\u20135139. PMLR, 2023.",
|
| 270 |
+
"url": null
|
| 271 |
+
}
|
| 272 |
+
},
|
| 273 |
+
{
|
| 274 |
+
"21": {
|
| 275 |
+
"title": "Making ai forget you: Data deletion in machine learning.",
|
| 276 |
+
"author": "Antonio Ginart, Melody Guan, Gregory Valiant, and James Y Zou.",
|
| 277 |
+
"venue": "In Advances in neural information processing systems,\nvolume 32, pages 3513\u20133526, 2019.",
|
| 278 |
+
"url": null
|
| 279 |
+
}
|
| 280 |
+
},
|
| 281 |
+
{
|
| 282 |
+
"22": {
|
| 283 |
+
"title": "Eternal sunshine of the spotless net: Selective forgetting in deep\nnetworks.",
|
| 284 |
+
"author": "Aditya Golatkar, Alessandro Achille, and Stefano Soatto.",
|
| 285 |
+
"venue": "In Proceedings of the IEEE/CVF Conference on Computer Vision\nand Pattern Recognition, pages 9304\u20139312, 2020a.",
|
| 286 |
+
"url": null
|
| 287 |
+
}
|
| 288 |
+
},
|
| 289 |
+
{
|
| 290 |
+
"23": {
|
| 291 |
+
"title": "Forgetting outside the box: Scrubbing deep networks of information\naccessible from input-output observations.",
|
| 292 |
+
"author": "Aditya Golatkar, Alessandro Achille, and Stefano Soatto.",
|
| 293 |
+
"venue": "In Computer Vision\u2013ECCV 2020: 16th European Conference,\nvolume 12374, pages 383\u2013398. Springer, 2020b.",
|
| 294 |
+
"url": null
|
| 295 |
+
}
|
| 296 |
+
},
|
| 297 |
+
{
|
| 298 |
+
"24": {
|
| 299 |
+
"title": "Mixed-privacy forgetting in deep networks.",
|
| 300 |
+
"author": "Aditya Golatkar, Alessandro Achille, Avinash Ravichandran, Marzia Polito, and\nStefano Soatto.",
|
| 301 |
+
"venue": "In Proceedings of the IEEE/CVF Conference on Computer Vision\nand Pattern Recognition, pages 792\u2013801. IEEE, 2021.",
|
| 302 |
+
"url": null
|
| 303 |
+
}
|
| 304 |
+
},
|
| 305 |
+
{
|
| 306 |
+
"25": {
|
| 307 |
+
"title": "Generative adversarial nets.",
|
| 308 |
+
"author": "Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley,\nSherjil Ozair, Aaron Courville, and Yoshua Bengio.",
|
| 309 |
+
"venue": "In Advances in Neural Information Processing Systems,\nvolume 27, pages 2672\u20132680, 2014.",
|
| 310 |
+
"url": null
|
| 311 |
+
}
|
| 312 |
+
},
|
| 313 |
+
{
|
| 314 |
+
"26": {
|
| 315 |
+
"title": "Amnesiac machine learning.",
|
| 316 |
+
"author": "Laura Graves, Vineel Nagisetty, and Vijay Ganesh.",
|
| 317 |
+
"venue": "In Proceedings of the AAAI Conference on Artificial\nIntelligence, volume 35, pages 11516\u201311524. AAAI Press, 2021.",
|
| 318 |
+
"url": null
|
| 319 |
+
}
|
| 320 |
+
},
|
| 321 |
+
{
|
| 322 |
+
"27": {
|
| 323 |
+
"title": "Certified data removal from machine learning models.",
|
| 324 |
+
"author": "Chuan Guo, Tom Goldstein, Awni Hannun, and Laurens Van Der Maaten.",
|
| 325 |
+
"venue": "In International Conference on Machine Learning, volume 119,\npages 3832\u20133842. PMLR, 2020.",
|
| 326 |
+
"url": null
|
| 327 |
+
}
|
| 328 |
+
},
|
| 329 |
+
{
|
| 330 |
+
"28": {
|
| 331 |
+
"title": "Approximate data deletion from machine learning models.",
|
| 332 |
+
"author": "Zachary Izzo, Mary Anne Smart, Kamalika Chaudhuri, and James Zou.",
|
| 333 |
+
"venue": "In International Conference on Artificial Intelligence and\nStatistics, volume 130, pages 2008\u20132016. PMLR, 2021.",
|
| 334 |
+
"url": null
|
| 335 |
+
}
|
| 336 |
+
},
|
| 337 |
+
{
|
| 338 |
+
"29": {
|
| 339 |
+
"title": "Understanding black-box predictions via influence functions.",
|
| 340 |
+
"author": "Pang Wei Koh and Percy Liang.",
|
| 341 |
+
"venue": "In International Conference on Machine Learning, volume 70,\npages 1885\u20131894. PMLR, 2017.",
|
| 342 |
+
"url": null
|
| 343 |
+
}
|
| 344 |
+
},
|
| 345 |
+
{
|
| 346 |
+
"30": {
|
| 347 |
+
"title": "Stability and generalization of stochastic gradient methods for\nminimax problems.",
|
| 348 |
+
"author": "Yunwen Lei, Zhenhuan Yang, Tianbao Yang, and Yiming Ying.",
|
| 349 |
+
"venue": "In International Conference on Machine Learning, volume 139,\npages 6175\u20136186. PMLR, 2021.",
|
| 350 |
+
"url": null
|
| 351 |
+
}
|
| 352 |
+
},
|
| 353 |
+
{
|
| 354 |
+
"31": {
|
| 355 |
+
"title": "Online forgetting process for linear regression models.",
|
| 356 |
+
"author": "Yuantong Li, Chi-Hua Wang, and Guang Cheng.",
|
| 357 |
+
"venue": "In International Conference on Artificial Intelligence and\nStatistics, pages 217\u2013225. PMLR, 2021.",
|
| 358 |
+
"url": null
|
| 359 |
+
}
|
| 360 |
+
},
|
| 361 |
+
{
|
| 362 |
+
"32": {
|
| 363 |
+
"title": "Erm-ktp: Knowledge-level machine unlearning via knowledge transfer.",
|
| 364 |
+
"author": "Shen Lin, Xiaoyu Zhang, Chenyang Chen, Xiaofeng Chen, and Willy Susilo.",
|
| 365 |
+
"venue": "In Proceedings of the IEEE/CVF Conference on Computer Vision\nand Pattern Recognition, pages 20147\u201320155, 2023.",
|
| 366 |
+
"url": null
|
| 367 |
+
}
|
| 368 |
+
},
|
| 369 |
+
{
|
| 370 |
+
"33": {
|
| 371 |
+
"title": "On gradient descent ascent for nonconvex-concave minimax problems.",
|
| 372 |
+
"author": "Tianyi Lin, Chi Jin, and Michael Jordan.",
|
| 373 |
+
"venue": "In International Conference on Machine Learning, volume 119,\npages 6083\u20136093. PMLR, 2020.",
|
| 374 |
+
"url": null
|
| 375 |
+
}
|
| 376 |
+
},
|
| 377 |
+
{
|
| 378 |
+
"34": {
|
| 379 |
+
"title": "Muter: Machine unlearning on adversarially trained models.",
|
| 380 |
+
"author": "Junxu Liu, Mingsheng Xue, Jian Lou, Xiaoyu Zhang, Li Xiong, and Zhan Qin.",
|
| 381 |
+
"venue": "In Proceedings of the IEEE/CVF International Conference on\nComputer Vision, pages 4892\u20134902, 2023.",
|
| 382 |
+
"url": null
|
| 383 |
+
}
|
| 384 |
+
},
|
| 385 |
+
{
|
| 386 |
+
"35": {
|
| 387 |
+
"title": "Finding second-order stationary points in nonconvex-strongly-concave\nminimax optimization.",
|
| 388 |
+
"author": "Luo Luo, Yujun Li, and Cheng Chen.",
|
| 389 |
+
"venue": "In Advances in Neural Information Processing Systems,\nvolume 35, pages 36667\u201336679, 2022.",
|
| 390 |
+
"url": null
|
| 391 |
+
}
|
| 392 |
+
},
|
| 393 |
+
{
|
| 394 |
+
"36": {
|
| 395 |
+
"title": "Towards deep learning models resistant to adversarial attacks.",
|
| 396 |
+
"author": "Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and\nAdrian Vladu.",
|
| 397 |
+
"venue": "In The Sixth International Conference on Learning\nRepresentations. OpenReview.net, 2018.",
|
| 398 |
+
"url": null
|
| 399 |
+
}
|
| 400 |
+
},
|
| 401 |
+
{
|
| 402 |
+
"37": {
|
| 403 |
+
"title": "Certifiable machine unlearning for linear models.",
|
| 404 |
+
"author": "Ananth Mahadevan and Michael Mathioudakis.",
|
| 405 |
+
"venue": "arXiv preprint arXiv:2106.15093, 2021.",
|
| 406 |
+
"url": null
|
| 407 |
+
}
|
| 408 |
+
},
|
| 409 |
+
{
|
| 410 |
+
"38": {
|
| 411 |
+
"title": "The eu proposal for a general data protection regulation and the\nroots of the \u2018right to be forgotten\u2019.",
|
| 412 |
+
"author": "Alessandro Mantelero.",
|
| 413 |
+
"venue": "Computer Law & Security Review, 29(3):229\u2013235, 2013.",
|
| 414 |
+
"url": null
|
| 415 |
+
}
|
| 416 |
+
},
|
| 417 |
+
{
|
| 418 |
+
"39": {
|
| 419 |
+
"title": "Deep unlearning via randomized conditionally independent hessians.",
|
| 420 |
+
"author": "Ronak Mehta, Sourav Pal, Vikas Singh, and Sathya N Ravi.",
|
| 421 |
+
"venue": "In Proceedings of the IEEE/CVF Conference on Computer Vision\nand Pattern Recognition, pages 10422\u201310431. IEEE, 2022.",
|
| 422 |
+
"url": null
|
| 423 |
+
}
|
| 424 |
+
},
|
| 425 |
+
{
|
| 426 |
+
"40": {
|
| 427 |
+
"title": "Descent-to-delete: Gradient-based methods for machine unlearning.",
|
| 428 |
+
"author": "Seth Neel, Aaron Roth, and Saeed Sharifi-Malvajerdi.",
|
| 429 |
+
"venue": "In Algorithmic Learning Theory, volume 132, pages 931\u2013962.\nPMLR, 2021.",
|
| 430 |
+
"url": null
|
| 431 |
+
}
|
| 432 |
+
},
|
| 433 |
+
{
|
| 434 |
+
"41": {
|
| 435 |
+
"title": "Cubic regularization of newton method and its global performance.",
|
| 436 |
+
"author": "Yurii Nesterov and Boris T Polyak.",
|
| 437 |
+
"venue": "Mathematical Programming, 108(1):177\u2013205,\n2006.",
|
| 438 |
+
"url": null
|
| 439 |
+
}
|
| 440 |
+
},
|
| 441 |
+
{
|
| 442 |
+
"42": {
|
| 443 |
+
"title": "Variational bayesian unlearning.",
|
| 444 |
+
"author": "Quoc Phong Nguyen, Bryan Kian Hsiang Low, and Patrick Jaillet.",
|
| 445 |
+
"venue": "In Advances in Neural Information Processing Systems,\nvolume 33, pages 16025\u201316036, 2020.",
|
| 446 |
+
"url": null
|
| 447 |
+
}
|
| 448 |
+
},
|
| 449 |
+
{
|
| 450 |
+
"43": {
|
| 451 |
+
"title": "What is a good metric to study generalization of minimax learners?",
|
| 452 |
+
"author": "Asuman E. Ozdaglar, Sarath Pattathil, Jiawei Zhang, and Kaiqing Zhang.",
|
| 453 |
+
"venue": "In Advances in Neural Information Processing Systems,\nvolume 35, pages 38190\u201338203, 2022.",
|
| 454 |
+
"url": null
|
| 455 |
+
}
|
| 456 |
+
},
|
| 457 |
+
{
|
| 458 |
+
"44": {
|
| 459 |
+
"title": "SSSE: efficiently erasing samples from trained machine learning\nmodels.",
|
| 460 |
+
"author": "Alexandra Peste, Dan Alistarh, and Christoph H Lampert.",
|
| 461 |
+
"venue": "arXiv preprint arXiv:2107.03860, 2021.",
|
| 462 |
+
"url": null
|
| 463 |
+
}
|
| 464 |
+
},
|
| 465 |
+
{
|
| 466 |
+
"45": {
|
| 467 |
+
"title": "Hedgecut: Maintaining randomised trees for low-latency machine\nunlearning.",
|
| 468 |
+
"author": "Sebastian Schelter, Stefan Grafberger, and Ted Dunning.",
|
| 469 |
+
"venue": "In International Conference on Management of Data, pages\n1545\u20131557. ACM, 2021.",
|
| 470 |
+
"url": null
|
| 471 |
+
}
|
| 472 |
+
},
|
| 473 |
+
{
|
| 474 |
+
"46": {
|
| 475 |
+
"title": "Remember what you want to forget: Algorithms for machine unlearning.",
|
| 476 |
+
"author": "Ayush Sekhari, Jayadev Acharya, Gautam Kamath, and Ananda Theertha Suresh.",
|
| 477 |
+
"venue": "In Advances in Neural Information Processing Systems,\nvolume 34, pages 18075\u201318086, 2021.",
|
| 478 |
+
"url": null
|
| 479 |
+
}
|
| 480 |
+
},
|
| 481 |
+
{
|
| 482 |
+
"47": {
|
| 483 |
+
"title": "Federated minimax optimization: Improved convergence analyses and\nalgorithms.",
|
| 484 |
+
"author": "Pranay Sharma, Rohan Panda, Gauri Joshi, and Pramod Varshney.",
|
| 485 |
+
"venue": "In International Conference on Machine Learning, volume 162,\npages 19683\u201319730. PMLR, 2022.",
|
| 486 |
+
"url": null
|
| 487 |
+
}
|
| 488 |
+
},
|
| 489 |
+
{
|
| 490 |
+
"48": {
|
| 491 |
+
"title": "Learning with selective forgetting.",
|
| 492 |
+
"author": "Takashi Shibata, Go Irie, Daiki Ikami, and Yu Mitsuzumi.",
|
| 493 |
+
"venue": "In Proceedings of the Thirtieth International Joint Conference\non Artificial Intelligence, pages 989\u2013996. ijcai.org, 2021.",
|
| 494 |
+
"url": null
|
| 495 |
+
}
|
| 496 |
+
},
|
| 497 |
+
{
|
| 498 |
+
"49": {
|
| 499 |
+
"title": "Certifying some distributional robustness with principled adversarial\ntraining.",
|
| 500 |
+
"author": "Aman Sinha, Hongseok Namkoong, and John Duchi.",
|
| 501 |
+
"venue": "In The Sixth International Conference on Learning\nRepresentations. OpenReview.net, 2018.",
|
| 502 |
+
"url": null
|
| 503 |
+
}
|
| 504 |
+
},
|
| 505 |
+
{
|
| 506 |
+
"50": {
|
| 507 |
+
"title": "Algorithms that approximate data removal: New results and\nlimitations.",
|
| 508 |
+
"author": "Vinith Suriyakumar and Ashia C Wilson.",
|
| 509 |
+
"venue": "In Advances in Neural Information Processing Systems,\nvolume 35, pages 18892\u201318903, 2022.",
|
| 510 |
+
"url": null
|
| 511 |
+
}
|
| 512 |
+
},
|
| 513 |
+
{
|
| 514 |
+
"51": {
|
| 515 |
+
"title": "Fast yet effective machine unlearning.",
|
| 516 |
+
"author": "Ayush K Tarun, Vikram S Chundawat, Murari Mandal, and Mohan Kankanhalli.",
|
| 517 |
+
"venue": "IEEE Transactions on Neural Networks and Learning Systems,\n2023.",
|
| 518 |
+
"url": null
|
| 519 |
+
}
|
| 520 |
+
},
|
| 521 |
+
{
|
| 522 |
+
"52": {
|
| 523 |
+
"title": "Efficient algorithms for smooth minimax optimization.",
|
| 524 |
+
"author": "Kiran K Thekumparampil, Prateek Jain, Praneeth Netrapalli, and Sewoong Oh.",
|
| 525 |
+
"venue": "In Advances in Neural Information Processing Systems,\nvolume 32, pages 12659\u201312670, 2019.",
|
| 526 |
+
"url": null
|
| 527 |
+
}
|
| 528 |
+
},
|
| 529 |
+
{
|
| 530 |
+
"53": {
|
| 531 |
+
"title": "Machine unlearning via algorithmic stability.",
|
| 532 |
+
"author": "Enayat Ullah, Tung Mai, Anup Rao, Ryan A Rossi, and Raman Arora.",
|
| 533 |
+
"venue": "In Conference on Learning Theory, volume 134, pages\n4126\u20134142. PMLR, 2021.",
|
| 534 |
+
"url": null
|
| 535 |
+
}
|
| 536 |
+
},
|
| 537 |
+
{
|
| 538 |
+
"54": {
|
| 539 |
+
"title": "The complexity of differential privacy.",
|
| 540 |
+
"author": "Salil Vadhan.",
|
| 541 |
+
"venue": "Tutorials on the Foundations of Cryptography: Dedicated to Oded\nGoldreich, pages 347\u2013450, 2017.",
|
| 542 |
+
"url": null
|
| 543 |
+
}
|
| 544 |
+
},
|
| 545 |
+
{
|
| 546 |
+
"55": {
|
| 547 |
+
"title": "Inductive graph unlearning.",
|
| 548 |
+
"author": "Cheng-Long Wang, Mengdi Huai, and Di Wang.",
|
| 549 |
+
"venue": "In 32nd USENIX Security Symposium, pages 3205\u20133222. USENIX\nAssociation, 2023a.",
|
| 550 |
+
"url": null
|
| 551 |
+
}
|
| 552 |
+
},
|
| 553 |
+
{
|
| 554 |
+
"56": {
|
| 555 |
+
"title": "Bfu: Bayesian federated unlearning with parameter self-sharing.",
|
| 556 |
+
"author": "Weiqi Wang, Zhiyi Tian, Chenhan Zhang, An Liu, and Shui Yu.",
|
| 557 |
+
"venue": "In Proceedings of the 2023 ACM Asia Conference on Computer and\nCommunications Security, pages 567\u2013578. ACM, 2023b.",
|
| 558 |
+
"url": null
|
| 559 |
+
}
|
| 560 |
+
},
|
| 561 |
+
{
|
| 562 |
+
"57": {
|
| 563 |
+
"title": "Machine unlearning of features and labels.",
|
| 564 |
+
"author": "Alexander Warnecke, Lukas Pirch, Christian Wressnegger, and Konrad Rieck.",
|
| 565 |
+
"venue": "In 30th Annual Network and Distributed System Security\nSymposium. The Internet Society, 2023.",
|
| 566 |
+
"url": null
|
| 567 |
+
}
|
| 568 |
+
},
|
| 569 |
+
{
|
| 570 |
+
"58": {
|
| 571 |
+
"title": "Puma: Performance unchanged model augmentation for training data\nremoval.",
|
| 572 |
+
"author": "Ga Wu, Masoud Hashemi, and Christopher Srinivasa.",
|
| 573 |
+
"venue": "In Proceedings of the AAAI Conference on Artificial\nIntelligence, volume 36, pages 8675\u20138682, 2022.",
|
| 574 |
+
"url": null
|
| 575 |
+
}
|
| 576 |
+
},
|
| 577 |
+
{
|
| 578 |
+
"59": {
|
| 579 |
+
"title": "Deltagrad: Rapid retraining of machine learning models.",
|
| 580 |
+
"author": "Yinjun Wu, Edgar Dobriban, and Susan Davidson.",
|
| 581 |
+
"venue": "In International Conference on Machine Learning, volume 119,\npages 10355\u201310366. PMLR, 2020.",
|
| 582 |
+
"url": null
|
| 583 |
+
}
|
| 584 |
+
},
|
| 585 |
+
{
|
| 586 |
+
"60": {
|
| 587 |
+
"title": "Deltaboost: Gradient boosting decision trees with efficient machine\nunlearning.",
|
| 588 |
+
"author": "Zhaomin Wu, Junhui Zhu, Qinbin Li, and Bingsheng He.",
|
| 589 |
+
"venue": "Proceedings of the ACM on Management of Data, 1(2):1\u201326, 2023.",
|
| 590 |
+
"url": null
|
| 591 |
+
}
|
| 592 |
+
},
|
| 593 |
+
{
|
| 594 |
+
"61": {
|
| 595 |
+
"title": "Equitable data valuation meets the right to be forgotten in model\nmarkets.",
|
| 596 |
+
"author": "Haocheng Xia, Jinfei Liu, Jian Lou, Zhan Qin, Kui Ren, Yang Cao, and Li Xiong.",
|
| 597 |
+
"venue": "Proceedings of the VLDB Endowment, 16(11):3349\u20133362, 2023.",
|
| 598 |
+
"url": null
|
| 599 |
+
}
|
| 600 |
+
},
|
| 601 |
+
{
|
| 602 |
+
"62": {
|
| 603 |
+
"title": "Arcane: An efficient architecture for exact machine unlearning.",
|
| 604 |
+
"author": "Haonan Yan, Xiaoguang Li, Ziyao Guo, Hui Li, Fenghua Li, and Xiaodong Lin.",
|
| 605 |
+
"venue": "In Proceedings of the Thirty-First International Joint\nConference on Artificial Intelligence, pages 4006\u20134013. ijcai.org, 2022.",
|
| 606 |
+
"url": null
|
| 607 |
+
}
|
| 608 |
+
},
|
| 609 |
+
{
|
| 610 |
+
"63": {
|
| 611 |
+
"title": "Differentially private sgda for minimax problems.",
|
| 612 |
+
"author": "Zhenhuan Yang, Shu Hu, Yunwen Lei, Kush R Vashney, Siwei Lyu, and Yiming Ying.",
|
| 613 |
+
"venue": "In Uncertainty in Artificial Intelligence, volume 180, pages\n2192\u20132202. PMLR, 2022.",
|
| 614 |
+
"url": null
|
| 615 |
+
}
|
| 616 |
+
},
|
| 617 |
+
{
|
| 618 |
+
"64": {
|
| 619 |
+
"title": "Newton-type methods for minimax optimization.",
|
| 620 |
+
"author": "Guojun Zhang, Kaiwen Wu, Pascal Poupart, and Yaoliang Yu.",
|
| 621 |
+
"venue": "arXiv preprint arXiv:2006.14592, 2020.",
|
| 622 |
+
"url": null
|
| 623 |
+
}
|
| 624 |
+
},
|
| 625 |
+
{
|
| 626 |
+
"65": {
|
| 627 |
+
"title": "Generalization bounds for stochastic saddle point problems.",
|
| 628 |
+
"author": "Junyu Zhang, Mingyi Hong, Mengdi Wang, and Shuzhong Zhang.",
|
| 629 |
+
"venue": "In International Conference on Artificial Intelligence and\nStatistics, volume 130, pages 568\u2013576. PMLR, 2021.",
|
| 630 |
+
"url": null
|
| 631 |
+
}
|
| 632 |
+
},
|
| 633 |
+
{
|
| 634 |
+
"66": {
|
| 635 |
+
"title": "Bring your own algorithm for optimal differentially private\nstochastic minimax optimization.",
|
| 636 |
+
"author": "Liang Zhang, Kiran K Thekumparampil, Sewoong Oh, and Niao He.",
|
| 637 |
+
"venue": "In Advances in Neural Information Processing Systems,\nvolume 35, pages 35174\u201335187, 2022a.",
|
| 638 |
+
"url": null
|
| 639 |
+
}
|
| 640 |
+
},
|
| 641 |
+
{
|
| 642 |
+
"67": {
|
| 643 |
+
"title": "Closed-form machine unlearning for matrix factorization.",
|
| 644 |
+
"author": "Shuijing Zhang, Jian Lou, Li Xiong, Xiaoyu Zhang, and Jing Liu.",
|
| 645 |
+
"venue": "In Proceedings of the 32nd ACM International Conference on\nInformation and Knowledge Management, pages 3278\u20133287, 2023.",
|
| 646 |
+
"url": null
|
| 647 |
+
}
|
| 648 |
+
},
|
| 649 |
+
{
|
| 650 |
+
"68": {
|
| 651 |
+
"title": "Uniform convergence and generalization for nonconvex stochastic\nminimax problems.",
|
| 652 |
+
"author": "Siqi Zhang, Yifan Hu, Liang Zhang, and Niao He.",
|
| 653 |
+
"venue": "In OPT 2022: Optimization for Machine Learning (NeurIPS 2022\nWorkshop), 2022b.",
|
| 654 |
+
"url": null
|
| 655 |
+
}
|
| 656 |
+
},
|
| 657 |
+
{
|
| 658 |
+
"69": {
|
| 659 |
+
"title": "Prompt certified machine unlearning with randomized gradient\nsmoothing and quantization.",
|
| 660 |
+
"author": "Zijie Zhang, Yang Zhou, Xin Zhao, Tianshi Che, and Lingjuan Lyu.",
|
| 661 |
+
"venue": "In Advances in Neural Information Processing Systems,\nvolume 35, pages 13433\u201313455, 2022c.",
|
| 662 |
+
"url": null
|
| 663 |
+
}
|
| 664 |
+
}
|
| 665 |
+
],
|
| 666 |
+
"url": "http://arxiv.org/html/2312.10336v2"
|
| 667 |
+
}
|
20241030/2401.00003v6.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2401.02349v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2401.10225v5.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2401.15866v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2401.16727v4.json
ADDED
|
@@ -0,0 +1,815 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Recent Advances in Online Hate Speech Moderation: Multimodality and the Role of Large Models",
|
| 3 |
+
"abstract": "Moderating hate speech (HS) in the evolving online landscape is a complex challenge, compounded by the multimodal nature of digital content. This survey examines recent advancements in HS moderation, focusing on the burgeoning role of large language models (LLMs) and large multimodal models (LMMs) in detecting, explaining, debiasing, and countering HS. We begin with a comprehensive analysis of current literature, uncovering how text, images, and audio interact to spread HS. The combination of these modalities adds complexity and subtlety to HS dissemination. We also identified research gaps, particularly in underrepresented languages and cultures, and highlight the need for solutions in low-resource settings. The survey concludes with future research directions, including novel AI methodologies, ethical AI governance, and the development of context-aware systems. This overview aims to inspire further research and foster collaboration towards responsible and human-centric approaches to HS moderation in the digital age.111 WARNING: This paper contains offensive examples.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "In the era of rapid information exchange and digital connectivity, the rise of hate speech (HS) presents a significant challenge with profound implications for global societies. HS, which is any communication demeaning a person or a group based on social or ethnic characteristics, undermines social harmony and individual safety, both online and offline Lupu et al. (2023 ###reference_b41###). The recent Israel\u2013Hamas conflict has notably escalated both anti-Muslim and anti-Semitic sentiments\nworldwide, evidenced by the trending of hashtags such as #HitlerWasRight and #DeathToMuslim on the social media platform X.222https://www.nytimes.com/2023/11/15/technology/hate-speech-israel-gaza-internet.html ###reference_ogy/hate-speech-israel-gaza-internet.html### Moreover, the Council on American\u2013Islamic Relations reported receiving 774 help requests and bias reports from Muslims in the USA within a 16-day period.333https://www.cair.com/press_releases/cair-reports-sharp-increase-in-complaints-reported-bias-incidents-since-107/ ###reference_eports-sharp-increase-in-complaints-reported-bias-incidents-since-107/### While digital interconnectivity facilitates swift information sharing, it simultaneously amplifies the spread and the impact of HS, transcending geographical boundaries.\nTechnological advancements have transformed the expression of HS, leading to its manifestation in various novel forms. Traditionally, HS was predominantly text-based, found in written materials Rini et al. (2020 ###reference_b58###), or verbalized in posts, broadcasts, and public speeches Nielsen (2002 ###reference_b53###). The digital era has ushered in more complex and subtle variants of HS, engaging multiple sensory modalities. A notable instance is vision-language HS, which fuses visual elements with text, commonly disseminated through captioned images and memes Uyheng et al. (2020 ###reference_b70###); Kiela et al. (2020 ###reference_b36###). Video-based HS, another emerging form, amalgamates text, visuals, and audio, creating a multi-faceted and potentially more influential mode of communication Das et al. (2023 ###reference_b15###). Figure 1 ###reference_### shows various HS forms targeting immigrants, underscoring animosity towards individuals of diverse nationalities. The text-based approach overtly projects hostile attitudes towards them in the host country. In vision-language HS, visual (e.g., a person preparing to shoot) and textual elements (e.g., sighting an illegal immigrant mowing the lawn) jointly convey antagonism. The figure also includes a music parody, integrating derogatory visuals with discriminatory audio lyrics, to showcase contempt for immigrants.\nWhile existing research surveys Rini et al. (2020 ###reference_b58###); Chhabra and Vishwakarma (2023 ###reference_b11###); Subramanian et al. (2023 ###reference_b66###) have largely focused on text-based HS, they often overlook the complexity of multimodal content. Our survey addresses this gap by offering a comprehensive analysis of HS across various digital platforms, including text, visual, auditory, and combined multimodal expressions. We explore the distinct ways HS manifests in these formats, providing insights into their characteristics and moderation challenges. Additionally, we emphasize the critical role of large language models (LLMs) and large multimodal models (LMMs) in moderating HS, given their ability to process and interpret diverse data types. This survey critically evaluates existing solutions, identifies areas for improvement, and advocates for a shift towards multimodal approaches in HS moderation.\nIn summary, our paper not only bridges the gap in the existing literature by providing a detailed exploration of multimodal HS but also paves the way for future research in this area. We aim to inspire advancements in HS moderation technology, particularly in the development and refinement of large models, which are imperative for tackling the complex and ever-changing nature of online HS.\nPaper Collection.\nWe systematically examined research pertaining to the moderation of various types of hate speech, encompassing text, images, videos, and audio. Our search involved keywords such as \u2019hate speech\u2019, \u2019multimodal hate speech\u2019, \u2019hateful memes\u2019, and similar terms, across scholarly platforms like Google Scholar, DBLP, IEEE Xplore, and ACM Digital Library.\nAmong related research, we further selected state-of-the-art studies, with a particular interest in those using LLMs and LMMs.\nDue to the need for a manageable scope, we excluded works that did not leverage LLMs or LMMs, or focused narrowly on regional or multilingual aspects without broader relevance. This decision is not a reflection on the quality or importance of these works but rather a necessity to maintain a focused and coherent survey.\n###table_1###"
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Hate Speech",
|
| 15 |
+
"text": "###figure_1### HS takes various forms \u2014 written text, images, spoken words, and multimedia content \u2014 each posing risks of violence, animosity, or prejudice against specific groups. This section reviews existing literature on HS, categorizing it into text-based, image-based, video-based, and audio-based types.\nFor each HS form, we provide a detailed categorization across four tasks: detection, explanation, debiasing, and counter-speech. Detection identifies hateful content, forming the basis for further actions. Explanation promotes transparency by clarifying why content is flagged, building trust in automated systems. Debiasing is essential to refine detection systems, ensuring fairness and reducing bias. Counter-speech involves taking proactive steps to mitigate the impact of hate speech, fostering healthier online dialogue. Although these tasks address different aspects, they collectively form the foundation of an effective content moderation strategy, highlighting both the interconnectedness of HS forms and the research gaps in advancing multimodal HS moderation.\nFigure 2 ###reference_### illustrates the range of online HS forms.\nAdditionally, Table 1 lists publicly accessible HS datasets in different modalities, providing researchers with essential resources for HS moderation."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "2.1",
|
| 19 |
+
"parent_section_id": "2",
|
| 20 |
+
"section_name": "Text-based Hate Speech",
|
| 21 |
+
"text": "Text-based HS encompasses written or typed expressions manifested across online platforms, such as social media posts Waseem and Hovy (2016 ###reference_b76###); Founta et al. (2018 ###reference_b22###). Recent studies explored diverse aspects of hate and derogatory language, focusing on implicit HS Sap et al. (2020 ###reference_b63###), targeted groups Kennedy et al. (2018 ###reference_b34###); Yoder et al. (2022 ###reference_b82###), and types of attacks ElSherief et al. (2021 ###reference_b18###). As HS detection models improve, it becomes imperative to understand and explain their decision-making processes, mitigating unintended bias Garg et al. (2023 ###reference_b23###). Additionally, some research shifted towards proactive strategies, including countering HS Masud et al. (2022 ###reference_b43###).\nThe detection of text-based HS poses numerous challenges. Detecting hate speech (HS) in a single statement often requires understanding dark humor and cultural nuances Hee et al. (2024 ###reference_b28###). HS can express underlying intent through sarcasm, irony, or cultural references, which may not be immediately apparent. Linguistic variations, such as slang, dialects, and unconventional language use, further complicate the task. The challenge intensifies when considering the broader context of an utterance Nagar et al. (2023 ###reference_b49###); Yu et al. (2022a ###reference_b84###), as statements that seem neutral in isolation may reveal hateful intent when viewed within a conversation. Conversely, what appears offensive might be harmless in context. Therefore, context-aware models are essential for accurately identifying HS by analyzing both individual statements and their surrounding situational context. Expanding the analysis to converssations, such as Reddit threads or WhatsApp chat, adds additional layers of complexity\nNaseem et al. (2019 ###reference_b50###). The intent behind a single message can shift based on prior exchanges and the overall tone of the conversation. Furthermore, user-specific features may be important for HS detection Qian et al. (2018 ###reference_b57###). Data such as a user\u2019s posting history, profile, and behavior provide valuable context for identifying hate speech, though using such data raises ethical concerns, particularly regarding privacy."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2.2",
|
| 25 |
+
"parent_section_id": "2",
|
| 26 |
+
"section_name": "Image-based Hate Speech",
|
| 27 |
+
"text": "Image-based HS utilizes visual elements, such as photographs, cartoons, and illustrations, to propagate hate or discrimination against specific groups. A common manifestation of this HS form is memes, which typically consist of images combined with short overlaid text. Although memes often serve humorous or satirical purposes, they are increasingly used to spread hateful content online Kiela et al. (2020 ###reference_b36###). Recent studies have developed datasets for identifying HS Gomez et al. (2020 ###reference_b27###), specific targets Mathias et al. (2021 ###reference_b46###) and types of attacks Fersini et al. (2022 ###reference_b20###) within these memes. Beyond detection, new approaches analyze and mitigate bias in image-based HS detection models Hee et al. (2023 ###reference_b29###); Lin et al. (2023 ###reference_b39###). Additionally, new methodologies are emerging to counteract HS transmitted through memes Van and Wu (2023 ###reference_b71###).\nImage-based HS presents new challenges due to the subtlety of offensive messages concealed within multiple modalities. Images, often embedding symbols, memes, or culturally specific visual cues, require deep cultural and contextual understanding for accurate interpretation. The visual elements and text can subtly imply meanings not immediately evident Kiela et al. (2020 ###reference_b36###). For example, Figure 1 depicts a man with a gun and text suggesting hostility towards immigrants. Differentiating humour from hate in memes is particularly challenging, influenced by varying cultural, societal, and personal perspectives Schmid (2023 ###reference_b64###)."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "2.3",
|
| 31 |
+
"parent_section_id": "2",
|
| 32 |
+
"section_name": "Video-based Hate Speech",
|
| 33 |
+
"text": "Video-based HS presents a complex challenge, comprising a blend of visuals, audio tracks, and/or textual elements. This form of HS ranges from professionally produced propaganda to amateur videos on social media platforms like YouTube and TikTok Das et al. (2023 ###reference_b15###); Wang et al. (2024 ###reference_b74###). The engaging nature of video content and its easy dissemination across digital networks significantly heighten its potential for harm. Echoing the concerns of image-based HS, video-based HS also contributes to the normalization of hateful ideologies and can profoundly influence public opinion. Contemporary research primarily focuses on identifying video-based HS and categorizing its various subtypes Wu and Bhandary (2020 ###reference_b79###). Nonetheless, the amount of research on video-based HS is less developed than text-based and image-based HS, particularly in areas such as analyzing and mitigating model bias, elucidating decision-making processes, and devising counterstrategies. These gaps, likely stemming from the rapid pace of technological advancements and evolving digital trends, underscore the need for further research to promote a more harmonious online environment.\nDetecting hate speech (HS) in videos is challenging and resource-intensive because it requires understanding various elements, including text, images, and audio, both independently and in combination. Each component can independently contain hateful content, further complicating the detection process. The duration of videos further exacerbates this challenge, as longer content necessitates more extensive review and analysis, with potential shifts in context over time. Moreover, subtle visual cues and sophisticated editing techniques can be employed to discreetly embed hate messages, making their detection by automated tools particularly challenging. Additionally, video content analysis requires considerable computational resources and time, posing a substantial challenge for organizations to detect and address HS in video formats."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "2.4",
|
| 37 |
+
"parent_section_id": "2",
|
| 38 |
+
"section_name": "Audio-based Hate Speech",
|
| 39 |
+
"text": "Audio-based HS entails the analysis of sound waves to discern elements such as pitch, intonation, and the contextual meaning of spoken words. This form of HS can originate from a variety of audio channels, including real-time conversations, podcasts, and other forms of audio media. The methodologies for addressing audio-based HS are diverse, targeting different facets of the issue. For instance, Barakat et al. (2012 ###reference_b4###) employed a straightforward keyword-based approach to identify segments of HS, while Wazir et al. (2020 ###reference_b77###) engaged in a detailed classification of offensive categories in audio-based HS, showcasing a nuanced method of understanding and categorizing this form of HS. This research area is still in its developmental stages, partly due to the scarcity of dataset. Nonetheless, recognizing the variety and significance of the approaches and techniques employed in this field is imperative. This recognition not only sheds light on the current state of research but also illuminates potential avenues for future exploration.\nDetecting HS in audio recordings presents unique challenges, primarily related to the transcription and interpretation of spoken words. The accuracy of speech recognition is crucial, especially when dealing with diverse accents, background noise, or poor audio quality. Additionally, the tone and intonation of spoken language play a significant role in conveying intent, which can substantially alter the meaning of words. This aspect poses a challenge for detection based solely on text transcripts, as subtle nuances in vocal expression may be lost during transcription. Moreover, non-verbal audio elements, such as sound cues or background noises, are pivotal in contextualizing speech. However, these elements are often difficult to interpret using automated methods."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "3",
|
| 43 |
+
"parent_section_id": null,
|
| 44 |
+
"section_name": "Methodology",
|
| 45 |
+
"text": "This section reviews the state-of-the-art methodologies that have significantly contributed to primary areas of HS research, particularly those involving large models. First, we discuss the recent capabilities of large models (Section 3.1 ###reference_###). Subsequently, we explore studies in four important HS areas: detection (Section 3.2 ###reference_###), explanation (Section 3.3 ###reference_###), debiasing (Section 3.4 ###reference_###), and counter-speech (Section 3.5 ###reference_###), focusing on works using large models. This review highlights the emerging trends, providing insights into how large models can be used to understand and address HS in its various forms."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "3.1",
|
| 49 |
+
"parent_section_id": "3",
|
| 50 |
+
"section_name": "Large Models",
|
| 51 |
+
"text": "The emergence of large foundation models, such as LLMs and LMMs, marks a significant milestone in artificial intelligence research, showcasing unprecedented capabilities in understanding and generating data across different formats Zhao et al. (2023 ###reference_b87###). LLMs are designed to excel in language understanding and text generation Touvron et al. (2023 ###reference_b69###). In contrast, LMMs are adept at processing and interpreting various data types, including visual, textual, and auditory inputs, enabling a broader spectrum of applications Yang et al. (2023b ###reference_b81###). These foundation models have opened new avenues for identifying and mitigating hateful content, which requires nuanced understanding of language and context.\nHere, we regard LLMs and LMMs as models with several billion parameters, aligning with the definition widely accepted and analyzed in numerous studies of large-scale models Luo et al. (2023 ###reference_b40###)."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "3.2",
|
| 55 |
+
"parent_section_id": "3",
|
| 56 |
+
"section_name": "Hate Speech Detection",
|
| 57 |
+
"text": "The leading detection techniques for HS vary according to the modality of the content, encompassing approaches from transformer-based models to spectrogram-based classification models.\nFor text-based HS detection, approaches range from embedding-based methods to advanced neural models Cao et al. (2020 ###reference_b10###); Davidson et al. (2017 ###reference_b16###); Badjatiya et al. (2017 ###reference_b2###); Fortuna and Nunes (2018 ###reference_b21###). AngryBERT Awal et al. (2021 ###reference_b1###) fine-tunes BERT using a multi-task learning strategy for binary text HS detection.\nPromptHate Cao et al. (2022 ###reference_b9###) combines demonstration sampling and in-context learning to fine-tune RoBERTa for hateful meme detection.\nIn audio-based HS detection, ensemble techniques such as AdaBoost, Naive Bayes, and Random Forest have been employed. Boishakhi et al. (2021 ###reference_b6###); Iba\u00f1ez et al. (2021 ###reference_b31###).\nCNNs are also used to convert audio into spectrograms Medina et al. (2022 ###reference_b47###), with self-attentive CNNs extracting audio features Yousefi and Emmanouilidou (2021 ###reference_b83###). For video-based HS detection, a combination of BERT, ViT, and MFCC has been used for text, image, and audio modality analysis, respectively Das et al. (2023 ###reference_b15###).\nNote that audio-based and video-based HS detection are emerging areas with significant potential for future advancements.\nTransformer-based models have significantly advanced the detection of text-based and image-based HS; yet they encounter challenges. For text-based models, a major hurdle is generalizing to out-of-distribution datasets, often hindered by limited vocabulary and the rarity of implicit HS in many datasets Ocampo et al. (2023b ###reference_b55###). To overcome this, recent initiatives include adversarial HS generation and in-context learning with LLMs. Ocampo et al. (2023a ###reference_b54###) introduced a method using GPT-3 to generate implicit HS, aiming to both challenge and improve HS classifiers. Concurrently, Wang et al. (2023b ###reference_b75###) developed a technique for optimizing example selection for in-context learning in LLMs.\nIn image-based HS, the primary challenge lies in deciphering implicit hate messages within memes. This often stems from the loss of information during the extraction of text-based features from images, a common step in many methodologies Lee et al. (2021 ###reference_b38###); Pramanick et al. (2021 ###reference_b56###); Cao et al. (2022 ###reference_b9###). Furthermore, the implicit HS in memes can be concealed by seemingly unrelated text and images, as illustrated in Figure 1 ###reference_###. To address these challenges, recent strategies include employing LMMs with prompting techniques and/or knowledge distillation. Pro-Cap Cao et al. (2023 ###reference_b7###) addresses the issue of information loss in image-to-text conversion by prompting an LMM in a QA format, enhancing the generated caption\u2019s quality and informativeness. To tackle the problem of disconnected text and images, MR.HARM Lin et al. (2023 ###reference_b39###) utilizes an LMM to generate potential rationales. These rationales are subsequently employed to fine-tune supervised HS classification systems through knowledge distillation, improving the detection of hateful memes."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "3.3",
|
| 61 |
+
"parent_section_id": "3",
|
| 62 |
+
"section_name": "Hate Speech Explanation",
|
| 63 |
+
"text": "A major challenge in contemporary HS detection methods is their lack of explainability in decision-making processes. Explainability is crucial for fostering user trust and facilitating systems that require human interaction Balkir et al. (2022 ###reference_b3###). One proposed solution involves training supervised models that not only categorize HS but also provide rationales for these classifications. Sap et al. (2020 ###reference_b63###) and ElSherief et al. (2021 ###reference_b18###) developed text-based HS datasets with human-annotated explanations, setting benchmarks for identifying underlying hate. Similarly, Hee et al. (2023 ###reference_b29###) compiled a dataset for hateful memes, complete with human-annotated explanations and benchmarks. However, collecting human-written explanations is not only time-consuming but also susceptible to individual biases. Moreover, it involves the risk of subjecting human annotators to prolonged exposure to HS, which can have adverse psychological effects.\nRecent studies have delved into employing LLMs to generate plausible and meaningful explanations for HS. For instance, Wang et al. (2023a ###reference_b73###) demonstrated that GPT-3 can craft convincing and effective explanations for HS, a finding substantiated by extensive human evaluations. Additionally, HARE Yang et al. (2023a ###reference_b80###) introduces two prompting methods that generate rationales for HS, enhancing the training of HS detection models and improving its performance. This approach presents an alternative means of developing insightful explanations, while simultaneously mitigating the risks associated with prolonged human exposure to HS. Nevertheless, this area of research is still nascent, thus presenting numerous opportunities for further investigation and development."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "3.4",
|
| 67 |
+
"parent_section_id": "3",
|
| 68 |
+
"section_name": "Hate Speech Debiasing",
|
| 69 |
+
"text": "Bias in HS detection models poses a significant risk to their effectiveness and fairness, leading to potential adverse impacts on individuals and society. Addressing this, numerous studies have focused on identifying and mitigating bias in these models. Sap et al. (2019 ###reference_b62###) found that two widely-used corpora exhibit bias against African American English, which increases the likelihood of classifying tweets in this dialect as hateful.\nHee et al. (2022 ###reference_b30###) conducted a quantitative analysis of modality bias in hateful meme detection, observing that the image modality significantly influences model predictions. Their study also highlighted the tendency of these models to generate false positives when encountering specific group identifier terms.\nBeyond merely identifying biases, various studies have introduced innovative methods to reduce these biases within models. Kennedy et al. (2020 ###reference_b35###) developed a regularization technique utilizing SOC post-hoc explanations to address group identifier bias. Similarly, Rizzi et al. (2023 ###reference_b59###) observed that models exhibit biases towards terms linked with stereotypical notions about women, such as dishwasher and broom. To counteract this, the authors proposed a bias mitigation strategy using Bayesian Optimization, which effectively lessened the bias while preserving overall model performance.\nThese efforts underscore the critical importance of not only recognizing, but also actively mitigating bias. This is especially vital as large models increasingly dominate the landscape for generating explanations and enabling transfer learning."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "3.5",
|
| 73 |
+
"parent_section_id": "3",
|
| 74 |
+
"section_name": "Counter Speech",
|
| 75 |
+
"text": "The approach to countering HS focuses on generating non-aggressive responses that either reduce the spread of HS or transform it into respectful and inoffensive speech. Recent research categorizes counter-speech into various response types and emphasizes the importance of contextual understanding. Yu et al. (2023 ###reference_b86###) developed a taxonomy of responses to HS, showcasing the diversity of counter-speech tactics. Mathew et al. (2019 ###reference_b44###) proposed context-specific strategies such as narrative persuasion and active rebuttal. CONAN Chung et al. (2021 ###reference_b13###) focused on generating counter-narratives that challenge hate directed at marginalized groups using reliable evidence, logical arguments, and diverse perspectives. These non-aggressive strategies reduce the spread of hate speech and foster positive discourse. Beyond generating non-aggressive responses, other approaches involve diminishing (i.e., normalization) or eliminating (i.e., correction) the level of hate in HS. NACL Masud et al. (2022 ###reference_b43###) used neural networks to paraphrase hate speech, effectively lowering the intensity of hate. Van and Wu (2023 ###reference_b71###) prompted LMMs to correct HS in memes by replacing hateful text with positive and respectful language.\nThese studies underscore the critical role of generative models in annotating and developing counter-speech strategies. This further signifies the future opportunities of LLMs and LMMs in enhancing approaches to combat hate speech."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "4",
|
| 79 |
+
"parent_section_id": null,
|
| 80 |
+
"section_name": "Challenges",
|
| 81 |
+
"text": "In the dynamic realm of research, especially in areas related to user-generated content and online harmfulness, numerous challenges persist that shape the trajectory and emphasis of scholarly investigations. These challenges, ranging from technical to ethical, define the landscape in which research on HS moderation and detection operates."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "5",
|
| 85 |
+
"parent_section_id": null,
|
| 86 |
+
"section_name": "Future Directions",
|
| 87 |
+
"text": ""
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "6",
|
| 91 |
+
"parent_section_id": null,
|
| 92 |
+
"section_name": "Conclusion",
|
| 93 |
+
"text": "We highlighted the advancements in HS moderation, underscoring the pivotal role of LLMs and LMMs. Despite these strides, challenges remain, particularly in inclusivity and nuanced detection. Future research should focus on developing AI methodologies that are more context-aware and ethically governed. This endeavor is not only a technological challenge, but also a moral imperative, necessitating interdisciplinary collaboration. As we advance, it is crucial to ensure that technological advancements are matched with a commitment to responsibility, striving for a digital environment that is secure and welcoming for everyone."
|
| 94 |
+
}
|
| 95 |
+
],
|
| 96 |
+
"appendix": [],
|
| 97 |
+
"tables": {
|
| 98 |
+
"1": {
|
| 99 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S1.T1\">\n<table class=\"ltx_tabular ltx_centering ltx_align_middle\" id=\"S1.T1.1\">\n<tr class=\"ltx_tr\" id=\"S1.T1.1.1\">\n<td class=\"ltx_td ltx_align_left ltx_border_tt\" id=\"S1.T1.1.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.1.1.1\">Mod.</span></td>\n<td class=\"ltx_td ltx_align_left ltx_border_tt\" id=\"S1.T1.1.1.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.1.2.1\">Dataset</span></td>\n<td class=\"ltx_td ltx_align_left ltx_border_tt\" id=\"S1.T1.1.1.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.1.3.1\">Task</span></td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_tt\" id=\"S1.T1.1.1.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.1.4.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.1.4.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.1.4.1.1.1\">Labels</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_tt\" id=\"S1.T1.1.1.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.1.5.1\">Source</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_tt\" id=\"S1.T1.1.1.6\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.1.6.1\"># Records</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.2\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.2.1\" rowspan=\"19\"><span class=\"ltx_text\" id=\"S1.T1.1.2.1.1\">Text</span></td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.2.2\">WZ-LS <cite class=\"ltx_cite ltx_citemacro_cite\">Waseem and Hovy (<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2401.16727v4#bib.bib76\" title=\"\">2016</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.2.3\">Det.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S1.T1.1.2.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.2.4.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.2.4.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.2.4.1.1.1\">[M.C.]</span> Sexism, Racism, Neither</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.2.5\">Twitter</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S1.T1.1.2.6\">16,914</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.3\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.3.1\">GHC <cite class=\"ltx_cite ltx_citemacro_cite\">Kennedy et\u00a0al. (<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2401.16727v4#bib.bib34\" title=\"\">2018</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.3.2\">Det.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S1.T1.1.3.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.3.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.3.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.3.3.1.1.1\">[M.C.]</span> VO, HD, CV <span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.3.3.1.1.2\">[B]</span> Implicitness <span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.3.3.1.1.3\">[M.C.]</span> Hate Targets</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.3.4\">Forums</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S1.T1.1.3.5\">27,665</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.4\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.4.1\">Stormfront <cite class=\"ltx_cite ltx_citemacro_cite\">de\u00a0Gibert et\u00a0al. (<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2401.16727v4#bib.bib17\" title=\"\">2018</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.4.2\">Det.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S1.T1.1.4.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.4.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.4.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.4.3.1.1.1\">[B]</span> Hateful</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.4.4\">StormFront</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S1.T1.1.4.5\">9,916</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.5\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.5.1\">DT <cite class=\"ltx_cite ltx_citemacro_cite\">Davidson et\u00a0al. (<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2401.16727v4#bib.bib16\" title=\"\">2017</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.5.2\">Det.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S1.T1.1.5.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.5.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.5.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.5.3.1.1.1\">[M.C.]</span> Hateful, Offensive, Neither</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.5.4\">Twitter</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S1.T1.1.5.5\">24,802</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.6\">\n<td class=\"ltx_td ltx_align_left ltx_border_tt\" id=\"S1.T1.1.6.1\">Founta <cite class=\"ltx_cite ltx_citemacro_cite\">Founta et\u00a0al. (<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2401.16727v4#bib.bib22\" title=\"\">2018</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_tt\" id=\"S1.T1.1.6.2\">Det.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_tt\" id=\"S1.T1.1.6.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.6.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.6.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.6.3.1.1.1\">[M.C.]</span> Offensive, Abusive, Hateful Speech, Aggressive, Cyberbullying, Spam, Normal</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_tt\" id=\"S1.T1.1.6.4\">Twitter</td>\n<td class=\"ltx_td ltx_align_right ltx_border_tt\" id=\"S1.T1.1.6.5\">80,000</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.7\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.7.1\">DynaHate <cite class=\"ltx_cite ltx_citemacro_cite\">Vidgen et\u00a0al. (<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2401.16727v4#bib.bib72\" title=\"\">2021</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.7.2\">Det.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S1.T1.1.7.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.7.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.7.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.7.3.1.1.1\">[B]</span> Hateful, <span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.7.3.1.1.2\">[M.C.]</span> Hate Targets, <span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.7.3.1.1.3\">[M.C]</span> Animosity, Derogation, Dehumanization, Threatening, Support</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.7.4\">H-M Adv</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S1.T1.1.7.5\">41,134</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.8\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.8.1\">SBIC <cite class=\"ltx_cite ltx_citemacro_cite\">Sap et\u00a0al. (<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2401.16727v4#bib.bib63\" title=\"\">2020</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.8.2\">Det.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S1.T1.1.8.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.8.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.8.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.8.3.1.1.1\">[B]</span> Offensive <span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.8.3.1.1.2\">[M.C]</span> Hate Targets <span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.8.3.1.1.3\">[B]</span> Intent <span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.8.3.1.1.4\">[B]</span> Lewd <span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.8.3.1.1.5\">[B]</span> Group <span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.8.3.1.1.6\">[B]</span> In-Group</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.8.4\">Mixed</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S1.T1.1.8.5\">44,671</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.9\">\n<td class=\"ltx_td\" id=\"S1.T1.1.9.1\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S1.T1.1.9.2\">Expl.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S1.T1.1.9.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.9.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.9.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.9.3.1.1.1\">[F.T.]</span> Implied Statement</span>\n</span>\n</td>\n<td class=\"ltx_td\" id=\"S1.T1.1.9.4\"></td>\n<td class=\"ltx_td\" id=\"S1.T1.1.9.5\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.10\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.10.1\">IHC <cite class=\"ltx_cite ltx_citemacro_cite\">ElSherief et\u00a0al. (<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2401.16727v4#bib.bib18\" title=\"\">2021</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.10.2\">Det.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S1.T1.1.10.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.10.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.10.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.10.3.1.1.1\">[M.C.]</span> Implicit, Explicit, Non-Hate <span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.10.3.1.1.2\">[M.C.]</span> Grievance, Incitement, Inferiority, Irony, Stereotypical, Threatening, Others</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.10.4\">Twitter</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S1.T1.1.10.5\">22,584</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.11\">\n<td class=\"ltx_td\" id=\"S1.T1.1.11.1\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S1.T1.1.11.2\">Expl.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S1.T1.1.11.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.11.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.11.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.11.3.1.1.1\">[F.T.]</span> Implied Statement</span>\n</span>\n</td>\n<td class=\"ltx_td\" id=\"S1.T1.1.11.4\"></td>\n<td class=\"ltx_td\" id=\"S1.T1.1.11.5\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.12\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.12.1\">HateXplain <cite class=\"ltx_cite ltx_citemacro_cite\">Mathew et\u00a0al. (<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2401.16727v4#bib.bib45\" title=\"\">2021</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.12.2\">Det.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S1.T1.1.12.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.12.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.12.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.12.3.1.1.1\">[M.C]</span> Hate, Offensive, Normal <span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.12.3.1.1.2\">[M.C.]</span> Hate Targets</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.12.4\">Mixed</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S1.T1.1.12.5\">20,148</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.13\">\n<td class=\"ltx_td\" id=\"S1.T1.1.13.1\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S1.T1.1.13.2\">Expl.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S1.T1.1.13.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.13.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.13.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.13.3.1.1.1\">[M.L]</span> Text Rationales/Snippets</span>\n</span>\n</td>\n<td class=\"ltx_td\" id=\"S1.T1.1.13.4\"></td>\n<td class=\"ltx_td\" id=\"S1.T1.1.13.5\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.14\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.14.1\">NACL <cite class=\"ltx_cite ltx_citemacro_cite\">Masud et\u00a0al. (<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2401.16727v4#bib.bib43\" title=\"\">2022</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.14.2\">Det.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S1.T1.1.14.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.14.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.14.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.14.3.1.1.1\">[M.C.]</span> Hate Intensity <span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.14.3.1.1.2\">[M.L.]</span> Hate Spans</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.14.4\">Mixed</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S1.T1.1.14.5\">4,423</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.15\">\n<td class=\"ltx_td\" id=\"S1.T1.1.15.1\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S1.T1.1.15.2\">Ctr.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S1.T1.1.15.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.15.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.15.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.15.3.1.1.1\">[F.T.]</span> Hate Speech Normalization</span>\n</span>\n</td>\n<td class=\"ltx_td\" id=\"S1.T1.1.15.4\"></td>\n<td class=\"ltx_td\" id=\"S1.T1.1.15.5\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.16\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.16.1\">CONAN <cite class=\"ltx_cite ltx_citemacro_cite\">Chung et\u00a0al. (<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2401.16727v4#bib.bib12\" title=\"\">2019</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.16.2\">Det.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S1.T1.1.16.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.16.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.16.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.16.3.1.1.1\">[M.C.]</span> Hate Types <span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.16.3.1.1.2\">[M.C.]</span> Hate Sub-Topic</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.16.4\">Synthetic</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S1.T1.1.16.5\">14,988</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.17\">\n<td class=\"ltx_td\" id=\"S1.T1.1.17.1\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S1.T1.1.17.2\">Ctr.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S1.T1.1.17.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.17.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.17.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.17.3.1.1.1\">[F.T.]</span> CN Generation</span>\n</span>\n</td>\n<td class=\"ltx_td\" id=\"S1.T1.1.17.4\"></td>\n<td class=\"ltx_td\" id=\"S1.T1.1.17.5\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.18\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.18.1\">Multitarget CONAN <cite class=\"ltx_cite ltx_citemacro_cite\">Fanton et\u00a0al. (<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2401.16727v4#bib.bib19\" title=\"\">2021</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.18.2\">Det.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S1.T1.1.18.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.18.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.18.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.18.3.1.1.1\">[M.C.]</span> Hate Targets</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.18.4\">GPT-2</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S1.T1.1.18.5\">5,003</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.19\">\n<td class=\"ltx_td\" id=\"S1.T1.1.19.1\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S1.T1.1.19.2\">Ctr.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S1.T1.1.19.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.19.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.19.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.19.3.1.1.1\">[F.T.]</span> CN Generation</span>\n</span>\n</td>\n<td class=\"ltx_td\" id=\"S1.T1.1.19.4\"></td>\n<td class=\"ltx_td\" id=\"S1.T1.1.19.5\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.20\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.20.1\">Counter Narratives <cite class=\"ltx_cite ltx_citemacro_cite\">Das et\u00a0al. (<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2401.16727v4#bib.bib15\" title=\"\">2023</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.20.2\">Ctr.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S1.T1.1.20.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.20.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.20.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.20.3.1.1.1\">[F.T.]</span> CN Generation</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.20.4\">YouTube</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S1.T1.1.20.5\">9,119</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.21\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.21.1\" rowspan=\"10\"><span class=\"ltx_text\" id=\"S1.T1.1.21.1.1\">Img</span></td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.21.2\">MMHS150K <cite class=\"ltx_cite ltx_citemacro_cite\">Gomez et\u00a0al. (<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2401.16727v4#bib.bib27\" title=\"\">2020</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.21.3\">Det.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S1.T1.1.21.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.21.4.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.21.4.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.21.4.1.1.1\">[B]</span> Hateful</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.21.5\">Twitter</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S1.T1.1.21.6\">150,000</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.22\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.22.1\">FHM <cite class=\"ltx_cite ltx_citemacro_cite\">Kiela et\u00a0al. (<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2401.16727v4#bib.bib36\" title=\"\">2020</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.22.2\">Det.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S1.T1.1.22.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.22.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.22.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.22.3.1.1.1\">[B]</span> Hateful</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.22.4\">Synthetic</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S1.T1.1.22.5\">10,000</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.23\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.23.1\">Finegrained FHM <cite class=\"ltx_cite ltx_citemacro_cite\">Mathias et\u00a0al. (<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2401.16727v4#bib.bib46\" title=\"\">2021</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.23.2\">Det.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S1.T1.1.23.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.23.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.23.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.23.3.1.1.1\">[B]</span> Hateful <span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.23.3.1.1.2\">[M.L.M.C]</span> Protected Category <span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.23.3.1.1.3\">[M.L.M.C]</span> Protected Attacks</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.23.4\">Synthetic</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S1.T1.1.23.5\">10,000</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.24\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.24.1\">Misogynous Meme</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.24.2\">Det.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S1.T1.1.24.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.24.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.24.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.24.3.1.1.1\">[B]</span> Misogynistic <span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.24.3.1.1.2\">[B]</span> Aggressive</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.24.4\">Mixed</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S1.T1.1.24.5\">800</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.25\">\n<td class=\"ltx_td ltx_align_left\" id=\"S1.T1.1.25.1\"><cite class=\"ltx_cite ltx_citemacro_cite\">Gasparini et\u00a0al. (<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2401.16727v4#bib.bib24\" title=\"\">2022</a>)</cite></td>\n<td class=\"ltx_td\" id=\"S1.T1.1.25.2\"></td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S1.T1.1.25.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.25.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.25.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.25.3.1.1.1\">[B]</span> Ironic</span>\n</span>\n</td>\n<td class=\"ltx_td\" id=\"S1.T1.1.25.4\"></td>\n<td class=\"ltx_td\" id=\"S1.T1.1.25.5\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.26\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.26.1\">MAMI <cite class=\"ltx_cite ltx_citemacro_cite\">Fersini et\u00a0al. (<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2401.16727v4#bib.bib20\" title=\"\">2022</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.26.2\">Det.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S1.T1.1.26.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.26.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.26.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.26.3.1.1.1\">[B]</span> Misogyny <span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.26.3.1.1.2\">[M.L.M.C.]</span> Misogynous, Shaming, Stereotype, Objectification, Violence</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.26.4\">Mixed</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S1.T1.1.26.5\">10,000</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.27\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.27.1\">UA-RU Conflict <cite class=\"ltx_cite ltx_citemacro_cite\">Thapa et\u00a0al. (<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2401.16727v4#bib.bib68\" title=\"\">2022</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.27.2\">Det.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S1.T1.1.27.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.27.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.27.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.27.3.1.1.1\">[B]</span> Hateful</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.27.4\">Twitter</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S1.T1.1.27.5\">5,680</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.28\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.28.1\">CrisisHateMM <cite class=\"ltx_cite ltx_citemacro_cite\">Bhandari et\u00a0al. (<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2401.16727v4#bib.bib5\" title=\"\">2023</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.28.2\">Det.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S1.T1.1.28.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.28.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.28.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.28.3.1.1.1\">[B]</span> Hateful <span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.28.3.1.1.2\">[B]</span> Directed <span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.28.3.1.1.3\">[M.C.]</span> Hate Targets</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.28.4\">Mixed</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S1.T1.1.28.5\">4,723</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.29\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.29.1\">RUHate-MM <cite class=\"ltx_cite ltx_citemacro_cite\">Thapa et\u00a0al. (<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2401.16727v4#bib.bib67\" title=\"\">2024</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.29.2\">Det.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S1.T1.1.29.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.29.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.29.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.29.3.1.1.1\">[B]</span> Hateful <span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.29.3.1.1.2\">[M.C]</span> Hate Targets</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.29.4\">Twitter</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S1.T1.1.29.5\">20,675</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.30\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.30.1\">HatReD <cite class=\"ltx_cite ltx_citemacro_cite\">Hee et\u00a0al. (<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2401.16727v4#bib.bib29\" title=\"\">2023</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.30.2\">Expl.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S1.T1.1.30.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.30.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.30.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.30.3.1.1.1\">[F.T]</span> Explanations</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.30.4\">Synthetic</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S1.T1.1.30.5\">3,228</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.31\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.31.1\" rowspan=\"3\"><span class=\"ltx_text\" id=\"S1.T1.1.31.1.1\">Video</span></td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.31.2\">Bangla Hate Videos <cite class=\"ltx_cite ltx_citemacro_cite\">Junaid et\u00a0al. (<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2401.16727v4#bib.bib33\" title=\"\">2021</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.31.3\">Det.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S1.T1.1.31.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.31.4.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.31.4.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.31.4.1.1.1\">[B]</span> Hateful</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.31.5\">YouTube</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S1.T1.1.31.6\">300</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.32\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.32.1\">HateMM <cite class=\"ltx_cite ltx_citemacro_cite\">Das et\u00a0al. (<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2401.16727v4#bib.bib15\" title=\"\">2023</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.32.2\">Det.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S1.T1.1.32.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.32.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.32.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.32.3.1.1.1\">[B]</span> Hateful <span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.32.3.1.1.2\">[M.C.]</span> Hate Targets</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.32.4\">Mixed</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S1.T1.1.32.5\">1,083</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.33\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.33.1\">MultiHateClip <cite class=\"ltx_cite ltx_citemacro_cite\">Wang et\u00a0al. (<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2401.16727v4#bib.bib74\" title=\"\">2024</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.33.2\">Det.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S1.T1.1.33.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.33.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.33.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.33.3.1.1.1\">[M.C]</span> Hateful, Offensive, Normal</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.33.4\">YouTube & Bilibili</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S1.T1.1.33.5\">2,000</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.34\">\n<td class=\"ltx_td ltx_align_left ltx_border_bb ltx_border_t\" id=\"S1.T1.1.34.1\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S1.T1.1.34.1.1\">Audio</span></td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.34.2\">DeToxy <cite class=\"ltx_cite ltx_citemacro_cite\">Ghosh et\u00a0al. (<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2401.16727v4#bib.bib25\" title=\"\">2021</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.34.3\">Det.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S1.T1.1.34.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.34.4.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.34.4.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.34.4.1.1.1\">[B]</span> Hateful</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S1.T1.1.34.5\">Mixed</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S1.T1.1.34.6\">2M</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.1.35\">\n<td class=\"ltx_td ltx_align_left ltx_border_bb ltx_border_t\" id=\"S1.T1.1.35.1\">MuTox <cite class=\"ltx_cite ltx_citemacro_cite\">Costa-juss\u00e0 et\u00a0al. (<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2401.16727v4#bib.bib14\" title=\"\">2024</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_bb ltx_border_t\" id=\"S1.T1.1.35.2\">Det.</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_bb ltx_border_t\" id=\"S1.T1.1.35.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S1.T1.1.35.3.1\">\n<span class=\"ltx_p\" id=\"S1.T1.1.35.3.1.1\" style=\"width:185.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.1.35.3.1.1.1\">[B]</span> Hateful</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_bb ltx_border_t\" id=\"S1.T1.1.35.4\">Mixed</td>\n<td class=\"ltx_td ltx_align_right ltx_border_bb ltx_border_t\" id=\"S1.T1.1.35.5\">116,000</td>\n</tr>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 1: </span>Publicly available datasets for HS detection (Det.), HS explanation (Expl.) and counter HS (Ctr.). Abbreviation: <span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.9.1\">M.L.</span>: multi-label, <span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.10.2\">M.C.</span>: multi-class, <span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.11.3\">M.L.M.C.</span>: multi-label multi-class, <span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.12.4\">B</span>: binary, <span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.13.5\">F.T</span>: free-text, <span class=\"ltx_text ltx_font_bold\" id=\"S1.T1.14.6\">H-M Adv</span>: Human-Machine Adversarial. <span class=\"ltx_text ltx_font_italic\" id=\"S1.T1.15.7\">Note that multilingual HS is out of the scope for the current review</span>.</figcaption>\n</figure>",
|
| 100 |
+
"capture": "Table 1: Publicly available datasets for HS detection (Det.), HS explanation (Expl.) and counter HS (Ctr.). Abbreviation: M.L.: multi-label, M.C.: multi-class, M.L.M.C.: multi-label multi-class, B: binary, F.T: free-text, H-M Adv: Human-Machine Adversarial. Note that multilingual HS is out of the scope for the current review."
|
| 101 |
+
}
|
| 102 |
+
},
|
| 103 |
+
"image_paths": {
|
| 104 |
+
"1": {
|
| 105 |
+
"figure_path": "2401.16727v4_figure_1.png",
|
| 106 |
+
"caption": "Figure 1: Examples of an anti-migrant HS in different forms, encompassing text, image and/or audio modalities. The text-based, vision-language and video-based HS are taken from the Social Bias Inference Corpus (SBIC) dataset, the Facebook Hateful Memes (FHM) dataset and the Bitchute website, respectively.",
|
| 107 |
+
"url": "http://arxiv.org/html/2401.16727v4/x1.png"
|
| 108 |
+
},
|
| 109 |
+
"2": {
|
| 110 |
+
"figure_path": "2401.16727v4_figure_2.png",
|
| 111 |
+
"caption": "Figure 2: Typology of HS based on modalities and tasks. The dark blue boxes are mature areas with multiple studies; light grey boxes are ongoing research,\nand hatched boxes are unexplored topics.",
|
| 112 |
+
"url": "http://arxiv.org/html/2401.16727v4/x2.png"
|
| 113 |
+
}
|
| 114 |
+
},
|
| 115 |
+
"validation": true,
|
| 116 |
+
"references": [
|
| 117 |
+
{
|
| 118 |
+
"1": {
|
| 119 |
+
"title": "Angrybert: Joint learning target and emotion for hate speech detection.",
|
| 120 |
+
"author": "Md Rabiul Awal, Rui Cao, Roy Ka-Wei Lee, and Sandra Mitrovi\u0107. 2021.",
|
| 121 |
+
"venue": "In PAKDD.",
|
| 122 |
+
"url": null
|
| 123 |
+
}
|
| 124 |
+
},
|
| 125 |
+
{
|
| 126 |
+
"2": {
|
| 127 |
+
"title": "Deep learning for hate speech detection in tweets.",
|
| 128 |
+
"author": "Pinkesh Badjatiya, Shashank Gupta, Manish Gupta, and Vasudeva Varma. 2017.",
|
| 129 |
+
"venue": "In Proceedings of the 26th international conference on World Wide Web companion, pages 759\u2013760.",
|
| 130 |
+
"url": null
|
| 131 |
+
}
|
| 132 |
+
},
|
| 133 |
+
{
|
| 134 |
+
"3": {
|
| 135 |
+
"title": "Necessity and sufficiency for explaining text classifiers: A case study in hate speech detection.",
|
| 136 |
+
"author": "Esma Balkir, Isar Nejadgholi, Kathleen C Fraser, and Svetlana Kiritchenko. 2022.",
|
| 137 |
+
"venue": "In NAACL.",
|
| 138 |
+
"url": null
|
| 139 |
+
}
|
| 140 |
+
},
|
| 141 |
+
{
|
| 142 |
+
"4": {
|
| 143 |
+
"title": "Detecting offensive user video blogs: An adaptive keyword spotting approach.",
|
| 144 |
+
"author": "M. S. Barakat, C. H. Ritz, and D. A. Stirling. 2012.",
|
| 145 |
+
"venue": "In ICALIP.",
|
| 146 |
+
"url": null
|
| 147 |
+
}
|
| 148 |
+
},
|
| 149 |
+
{
|
| 150 |
+
"5": {
|
| 151 |
+
"title": "Crisishatemm: Multimodal analysis of directed and undirected hate speech in text-embedded images from russia-ukraine conflict.",
|
| 152 |
+
"author": "Aashish Bhandari, Siddhant Bikram Shah, Surendrabikram Thapa, Usman Naseem, and Mehwish Nasim. 2023.",
|
| 153 |
+
"venue": "In CVPR Workshops. IEEE.",
|
| 154 |
+
"url": null
|
| 155 |
+
}
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"6": {
|
| 159 |
+
"title": "Multi-modal hate speech detection using machine learning.",
|
| 160 |
+
"author": "Fariha Tahosin Boishakhi, Ponkoj Chandra Shill, and Md Golam Rabiul Alam. 2021.",
|
| 161 |
+
"venue": "In Big Data. IEEE.",
|
| 162 |
+
"url": null
|
| 163 |
+
}
|
| 164 |
+
},
|
| 165 |
+
{
|
| 166 |
+
"7": {
|
| 167 |
+
"title": "Pro-cap: Leveraging a frozen vision-language model for hateful meme detection.",
|
| 168 |
+
"author": "Rui Cao, Ming Shan Hee, Adriel Kuek, Wen-Haw Chong, Roy Ka-Wei Lee, and Jing Jiang. 2023.",
|
| 169 |
+
"venue": "In ACMMM.",
|
| 170 |
+
"url": null
|
| 171 |
+
}
|
| 172 |
+
},
|
| 173 |
+
{
|
| 174 |
+
"8": {
|
| 175 |
+
"title": "Hategan: Adversarial generative-based data augmentation for hate speech detection.",
|
| 176 |
+
"author": "Rui Cao and Roy Ka-Wei Lee. 2020.",
|
| 177 |
+
"venue": "In COLING.",
|
| 178 |
+
"url": null
|
| 179 |
+
}
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"9": {
|
| 183 |
+
"title": "Prompting for multimodal hateful meme classification.",
|
| 184 |
+
"author": "Rui Cao, Roy Ka-Wei Lee, Wen-Haw Chong, and Jing Jiang. 2022.",
|
| 185 |
+
"venue": "In EMNLP.",
|
| 186 |
+
"url": null
|
| 187 |
+
}
|
| 188 |
+
},
|
| 189 |
+
{
|
| 190 |
+
"10": {
|
| 191 |
+
"title": "Deephate: Hate speech detection via multi-faceted text representations.",
|
| 192 |
+
"author": "Rui Cao, Roy Ka-Wei Lee, and Tuan-Anh Hoang. 2020.",
|
| 193 |
+
"venue": "In WebSci.",
|
| 194 |
+
"url": null
|
| 195 |
+
}
|
| 196 |
+
},
|
| 197 |
+
{
|
| 198 |
+
"11": {
|
| 199 |
+
"title": "A literature survey on multimodal and multilingual automatic hate speech identification.",
|
| 200 |
+
"author": "Anusha Chhabra and Dinesh Kumar Vishwakarma. 2023.",
|
| 201 |
+
"venue": "Multimedia Systems.",
|
| 202 |
+
"url": null
|
| 203 |
+
}
|
| 204 |
+
},
|
| 205 |
+
{
|
| 206 |
+
"12": {
|
| 207 |
+
"title": "CONAN - COunter NArratives through nichesourcing: a multilingual dataset of responses to fight online hate speech.",
|
| 208 |
+
"author": "Yi-Ling Chung, Elizaveta Kuzmenko, Serra Sinem Tekiroglu, and Marco Guerini. 2019.",
|
| 209 |
+
"venue": "In ACL.",
|
| 210 |
+
"url": null
|
| 211 |
+
}
|
| 212 |
+
},
|
| 213 |
+
{
|
| 214 |
+
"13": {
|
| 215 |
+
"title": "Towards knowledge-grounded counter narrative generation for hate speech.",
|
| 216 |
+
"author": "Yi-Ling Chung, Serra Sinem Tekiro\u011flu, and Marco Guerini. 2021.",
|
| 217 |
+
"venue": "In ACL (Findings).",
|
| 218 |
+
"url": "https://doi.org/10.18653/v1/2021.findings-acl.79"
|
| 219 |
+
}
|
| 220 |
+
},
|
| 221 |
+
{
|
| 222 |
+
"14": {
|
| 223 |
+
"title": "Mutox: Universal multilingual audio-based toxicity dataset and zero-shot detector.",
|
| 224 |
+
"author": "Marta R Costa-juss\u00e0, Mariano Coria Meglioli, Pierre Andrews, David Dale, Prangthip Hansanti, Elahe Kalbassi, Alex Mourachko, Christophe Ropers, and Carleigh Wood. 2024.",
|
| 225 |
+
"venue": "arXiv preprint arXiv:2401.05060.",
|
| 226 |
+
"url": null
|
| 227 |
+
}
|
| 228 |
+
},
|
| 229 |
+
{
|
| 230 |
+
"15": {
|
| 231 |
+
"title": "Hatemm: A multi-modal dataset for hate video classification.",
|
| 232 |
+
"author": "Mithun Das, Rohit Raj, Punyajoy Saha, Binny Mathew, Manish Gupta, and Animesh Mukherjee. 2023.",
|
| 233 |
+
"venue": "In ICWSM.",
|
| 234 |
+
"url": null
|
| 235 |
+
}
|
| 236 |
+
},
|
| 237 |
+
{
|
| 238 |
+
"16": {
|
| 239 |
+
"title": "Automated hate speech detection and the problem of offensive language.",
|
| 240 |
+
"author": "Thomas Davidson, Dana Warmsley, Michael Macy, and Ingmar Weber. 2017.",
|
| 241 |
+
"venue": "In ICWSM.",
|
| 242 |
+
"url": null
|
| 243 |
+
}
|
| 244 |
+
},
|
| 245 |
+
{
|
| 246 |
+
"17": {
|
| 247 |
+
"title": "Hate speech dataset from a white supremacy forum.",
|
| 248 |
+
"author": "Ona de Gibert, Naiara Perez, Aitor Garc\u00eda-Pablos, and Montse Cuadros. 2018.",
|
| 249 |
+
"venue": "In Proc. of the 2st workshop on ab. lang. online.",
|
| 250 |
+
"url": null
|
| 251 |
+
}
|
| 252 |
+
},
|
| 253 |
+
{
|
| 254 |
+
"18": {
|
| 255 |
+
"title": "Latent hatred: A benchmark for understanding implicit hate speech.",
|
| 256 |
+
"author": "Mai ElSherief, Caleb Ziems, David Muchlinski, Vaishnavi Anupindi, Jordyn Seybolt, Munmun De Choudhury, and Diyi Yang. 2021.",
|
| 257 |
+
"venue": "In EMNLP.",
|
| 258 |
+
"url": null
|
| 259 |
+
}
|
| 260 |
+
},
|
| 261 |
+
{
|
| 262 |
+
"19": {
|
| 263 |
+
"title": "Human-in-the-loop for data collection: a multi-target counter narrative dataset to fight online hate speech.",
|
| 264 |
+
"author": "Margherita Fanton, Helena Bonaldi, Serra Sinem Tekiroglu, and Marco Guerini. 2021.",
|
| 265 |
+
"venue": "In ACL.",
|
| 266 |
+
"url": null
|
| 267 |
+
}
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"20": {
|
| 271 |
+
"title": "Semeval-2022 task 5: Multimedia automatic misogyny identification.",
|
| 272 |
+
"author": "Elisabetta Fersini, Francesca Gasparini, Giulia Rizzi, Aurora Saibene, Berta Chulvi, Paolo Rosso, Alyssa Lees, and Jeffrey Sorensen. 2022.",
|
| 273 |
+
"venue": "In SemEval@NAACL.",
|
| 274 |
+
"url": null
|
| 275 |
+
}
|
| 276 |
+
},
|
| 277 |
+
{
|
| 278 |
+
"21": {
|
| 279 |
+
"title": "A survey on automatic detection of hate speech in text.",
|
| 280 |
+
"author": "Paula Fortuna and S\u00e9rgio Nunes. 2018.",
|
| 281 |
+
"venue": "ACM Computing Surveys (CSUR), 51(4):1\u201330.",
|
| 282 |
+
"url": null
|
| 283 |
+
}
|
| 284 |
+
},
|
| 285 |
+
{
|
| 286 |
+
"22": {
|
| 287 |
+
"title": "Large scale crowdsourcing and characterization of twitter abusive behavior.",
|
| 288 |
+
"author": "Antigoni Founta, Constantinos Djouvas, Despoina Chatzakou, Ilias Leontiadis, Jeremy Blackburn, Gianluca Stringhini, Athena Vakali, Michael Sirivianos, and Nicolas Kourtellis. 2018.",
|
| 289 |
+
"venue": "In ICWSM.",
|
| 290 |
+
"url": null
|
| 291 |
+
}
|
| 292 |
+
},
|
| 293 |
+
{
|
| 294 |
+
"23": {
|
| 295 |
+
"title": "Handling bias in toxic speech detection: A survey.",
|
| 296 |
+
"author": "Tanmay Garg, Sarah Masud, Tharun Suresh, and Tanmoy Chakraborty. 2023.",
|
| 297 |
+
"venue": "ACM Computing Surveys, 55(13s):1\u201332.",
|
| 298 |
+
"url": null
|
| 299 |
+
}
|
| 300 |
+
},
|
| 301 |
+
{
|
| 302 |
+
"24": {
|
| 303 |
+
"title": "Benchmark dataset of memes with text transcriptions for automatic detection of multi-modal misogynistic content.",
|
| 304 |
+
"author": "Francesca Gasparini, Giulia Rizzi, Aurora Saibene, and Elisabetta Fersini. 2022.",
|
| 305 |
+
"venue": "Data in brief.",
|
| 306 |
+
"url": null
|
| 307 |
+
}
|
| 308 |
+
},
|
| 309 |
+
{
|
| 310 |
+
"25": {
|
| 311 |
+
"title": "Detoxy: A large-scale multimodal dataset for toxicity classification in spoken utterances.",
|
| 312 |
+
"author": "Sreyan Ghosh, Samden Lepcha, S Sakshi, Rajiv Ratn Shah, and Srinivasan Umesh. 2021.",
|
| 313 |
+
"venue": "arXiv preprint arXiv:2110.07592.",
|
| 314 |
+
"url": null
|
| 315 |
+
}
|
| 316 |
+
},
|
| 317 |
+
{
|
| 318 |
+
"26": {
|
| 319 |
+
"title": "CoSyn: Detecting implicit hate speech in online conversations using a context synergized hyperbolic network.",
|
| 320 |
+
"author": "Sreyan Ghosh, Manan Suri, Purva Chiniya, Utkarsh Tyagi, Sonal Kumar, and Dinesh Manocha. 2023.",
|
| 321 |
+
"venue": "In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 6159\u20136173.",
|
| 322 |
+
"url": null
|
| 323 |
+
}
|
| 324 |
+
},
|
| 325 |
+
{
|
| 326 |
+
"27": {
|
| 327 |
+
"title": "Exploring hate speech detection in multimodal publications.",
|
| 328 |
+
"author": "Raul Gomez, Jaume Gibert, Llu\u00eds G\u00f3mez, and Dimosthenis Karatzas. 2020.",
|
| 329 |
+
"venue": "In IEEE/WACV.",
|
| 330 |
+
"url": null
|
| 331 |
+
}
|
| 332 |
+
},
|
| 333 |
+
{
|
| 334 |
+
"28": {
|
| 335 |
+
"title": "Understanding (dark) humour with internet meme analysis.",
|
| 336 |
+
"author": "Ming Shan Hee, Rui Cao, Tanmoy Chakraborty, and Roy Ka-Wei Lee. 2024.",
|
| 337 |
+
"venue": "In Companion Proceedings of the ACM on Web Conference 2024, pages 1276\u20131279.",
|
| 338 |
+
"url": null
|
| 339 |
+
}
|
| 340 |
+
},
|
| 341 |
+
{
|
| 342 |
+
"29": {
|
| 343 |
+
"title": "Decoding the underlying meaning of multimodal hateful memes.",
|
| 344 |
+
"author": "Ming Shan Hee, Wen-Haw Chong, and Roy Ka-Wei Lee. 2023.",
|
| 345 |
+
"venue": "In IJCAI.",
|
| 346 |
+
"url": null
|
| 347 |
+
}
|
| 348 |
+
},
|
| 349 |
+
{
|
| 350 |
+
"30": {
|
| 351 |
+
"title": "On explaining multimodal hateful meme detection models.",
|
| 352 |
+
"author": "Ming Shan Hee, Roy Ka-Wei Lee, and Wen-Haw Chong. 2022.",
|
| 353 |
+
"venue": "In WWW.",
|
| 354 |
+
"url": null
|
| 355 |
+
}
|
| 356 |
+
},
|
| 357 |
+
{
|
| 358 |
+
"31": {
|
| 359 |
+
"title": "Audio-based hate speech classification from online short-form videos.",
|
| 360 |
+
"author": "Michael Iba\u00f1ez, Ranz Sapinit, Lloyd Antonie Reyes, Mohammed Hussien, Joseph Marvin Imperial, and Ramon Rodriguez. 2021.",
|
| 361 |
+
"venue": "In IALP.",
|
| 362 |
+
"url": null
|
| 363 |
+
}
|
| 364 |
+
},
|
| 365 |
+
{
|
| 366 |
+
"32": {
|
| 367 |
+
"title": "Survey of hallucination in natural language generation.",
|
| 368 |
+
"author": "Ziwei Ji, Nayeon Lee, Rita Frieske, Tiezheng Yu, Dan Su, Yan Xu, Etsuko Ishii, Ye Jin Bang, Andrea Madotto, and Pascale Fung. 2023.",
|
| 369 |
+
"venue": "ACM Computing Surveys.",
|
| 370 |
+
"url": null
|
| 371 |
+
}
|
| 372 |
+
},
|
| 373 |
+
{
|
| 374 |
+
"33": {
|
| 375 |
+
"title": "Bangla hate speech detection in videos using machine learning.",
|
| 376 |
+
"author": "Mohd Istiaq Hossain Junaid, Faisal Hossain, and Rashedur M Rahman. 2021.",
|
| 377 |
+
"venue": "In UEMCON, pages 0347\u20130351. IEEE.",
|
| 378 |
+
"url": null
|
| 379 |
+
}
|
| 380 |
+
},
|
| 381 |
+
{
|
| 382 |
+
"34": {
|
| 383 |
+
"title": "The gab hate corpus: A collection of 27k posts annotated for hate speech.",
|
| 384 |
+
"author": "Brendan Kennedy, Mohammad Atari, Aida Mostafazadeh Davani, Leigh Yeh, Ali Omrani, Yehsong Kim, Kris Coombs, Shreya Havaldar, Gwenyth Portillo-Wightman, Elaine Gonzalez, et al. 2018.",
|
| 385 |
+
"venue": "PsyArXiv. July.",
|
| 386 |
+
"url": null
|
| 387 |
+
}
|
| 388 |
+
},
|
| 389 |
+
{
|
| 390 |
+
"35": {
|
| 391 |
+
"title": "Contextualizing hate speech classifiers with post-hoc explanation.",
|
| 392 |
+
"author": "Brendan Kennedy, Xisen Jin, Aida Mostafazadeh Davani, Morteza Dehghani, and Xiang Ren. 2020.",
|
| 393 |
+
"venue": "In ACL.",
|
| 394 |
+
"url": null
|
| 395 |
+
}
|
| 396 |
+
},
|
| 397 |
+
{
|
| 398 |
+
"36": {
|
| 399 |
+
"title": "The hateful memes challenge: Detecting hate speech in multimodal memes.",
|
| 400 |
+
"author": "Douwe Kiela, Hamed Firooz, Aravind Mohan, Vedanuj Goswami, Amanpreet Singh, Pratik Ringshia, and Davide Testuggine. 2020.",
|
| 401 |
+
"venue": "In NeurIPS.",
|
| 402 |
+
"url": null
|
| 403 |
+
}
|
| 404 |
+
},
|
| 405 |
+
{
|
| 406 |
+
"37": {
|
| 407 |
+
"title": "Hate speech classifiers are culturally insensitive.",
|
| 408 |
+
"author": "Nayeon Lee, Chani Jung, and Alice Oh. 2023.",
|
| 409 |
+
"venue": "In Proceedings of the First Workshop on Cross-Cultural Considerations in NLP (C3NLP), pages 35\u201346.",
|
| 410 |
+
"url": null
|
| 411 |
+
}
|
| 412 |
+
},
|
| 413 |
+
{
|
| 414 |
+
"38": {
|
| 415 |
+
"title": "Disentangling hate in online memes.",
|
| 416 |
+
"author": "Roy Ka-Wei Lee, Rui Cao, Ziqing Fan, Jing Jiang, and Wen-Haw Chong. 2021.",
|
| 417 |
+
"venue": "In ACMMM.",
|
| 418 |
+
"url": null
|
| 419 |
+
}
|
| 420 |
+
},
|
| 421 |
+
{
|
| 422 |
+
"39": {
|
| 423 |
+
"title": "Beneath the surface: Unveiling harmful memes with multimodal reasoning distilled from large language models.",
|
| 424 |
+
"author": "Hongzhan Lin, Ziyang Luo, Jing Ma, and Long Chen. 2023.",
|
| 425 |
+
"venue": "In EMNLP (Findings).",
|
| 426 |
+
"url": null
|
| 427 |
+
}
|
| 428 |
+
},
|
| 429 |
+
{
|
| 430 |
+
"40": {
|
| 431 |
+
"title": "Zero-resource hallucination prevention for large language models.",
|
| 432 |
+
"author": "Junyu Luo, Cao Xiao, and Fenglong Ma. 2023.",
|
| 433 |
+
"venue": "arXiv preprint arXiv:2309.02654.",
|
| 434 |
+
"url": null
|
| 435 |
+
}
|
| 436 |
+
},
|
| 437 |
+
{
|
| 438 |
+
"41": {
|
| 439 |
+
"title": "Offline events and online hate.",
|
| 440 |
+
"author": "Yonatan Lupu, Richard Sear, Nicolas Vel\u00e1squez, Rhys Leahy, Nicholas Johnson Restrepo, Beth Goldberg, and Neil F Johnson. 2023.",
|
| 441 |
+
"venue": "PLoS one.",
|
| 442 |
+
"url": null
|
| 443 |
+
}
|
| 444 |
+
},
|
| 445 |
+
{
|
| 446 |
+
"42": {
|
| 447 |
+
"title": "You know what to do proactive detection of youtube videos targeted by coordinated hate attacks.",
|
| 448 |
+
"author": "Enrico Mariconti, Guillermo Suarez-Tangil, Jeremy Blackburn, Emiliano De Cristofaro, Nicolas Kourtellis, Ilias Leontiadis, Jordi Luque Serrano, and Gianluca Stringhini. 2019.",
|
| 449 |
+
"venue": "Proc. of the ACM on HCI.",
|
| 450 |
+
"url": null
|
| 451 |
+
}
|
| 452 |
+
},
|
| 453 |
+
{
|
| 454 |
+
"43": {
|
| 455 |
+
"title": "Proactively reducing the hate intensity of online posts via hate speech normalization.",
|
| 456 |
+
"author": "Sarah Masud, Manjot Bedi, Mohammad Aflah Khan, Md Shad Akhtar, and Tanmoy Chakraborty. 2022.",
|
| 457 |
+
"venue": "In ACM-SIGKDD.",
|
| 458 |
+
"url": null
|
| 459 |
+
}
|
| 460 |
+
},
|
| 461 |
+
{
|
| 462 |
+
"44": {
|
| 463 |
+
"title": "Thou shalt not hate: Countering online hate speech.",
|
| 464 |
+
"author": "Binny Mathew, Punyajoy Saha, Hardik Tharad, Subham Rajgaria, Prajwal Singhania, Suman Kalyan Maity, Pawan Goyal, and Animesh Mukherjee. 2019.",
|
| 465 |
+
"venue": "In ICWSM.",
|
| 466 |
+
"url": null
|
| 467 |
+
}
|
| 468 |
+
},
|
| 469 |
+
{
|
| 470 |
+
"45": {
|
| 471 |
+
"title": "Hatexplain: A benchmark dataset for explainable hate speech detection.",
|
| 472 |
+
"author": "Binny Mathew, Punyajoy Saha, Seid Muhie Yimam, Chris Biemann, Pawan Goyal, and Animesh Mukherjee. 2021.",
|
| 473 |
+
"venue": "In AAAI.",
|
| 474 |
+
"url": null
|
| 475 |
+
}
|
| 476 |
+
},
|
| 477 |
+
{
|
| 478 |
+
"46": {
|
| 479 |
+
"title": "Findings of the WOAH 5 shared task on fine grained hateful memes detection.",
|
| 480 |
+
"author": "Lambert Mathias, Shaoliang Nie, Aida Mostafazadeh Davani, Douwe Kiela, Vinodkumar Prabhakaran, Bertie Vidgen, and Zeerak Waseem. 2021.",
|
| 481 |
+
"venue": "In WOAH.",
|
| 482 |
+
"url": null
|
| 483 |
+
}
|
| 484 |
+
},
|
| 485 |
+
{
|
| 486 |
+
"47": {
|
| 487 |
+
"title": "Audio-based hate speech detection for the metaverse using cnn.",
|
| 488 |
+
"author": "Robin Medina, Judith Njoku, Jae Min Lee, and Dong-Seong Kim. 2022.",
|
| 489 |
+
"venue": "In KICS.",
|
| 490 |
+
"url": null
|
| 491 |
+
}
|
| 492 |
+
},
|
| 493 |
+
{
|
| 494 |
+
"48": {
|
| 495 |
+
"title": "Predicting hate intensity of twitter conversation threads.",
|
| 496 |
+
"author": "Qing Meng, Tharun Suresh, Roy Ka-Wei Lee, and Tanmoy Chakraborty. 2023.",
|
| 497 |
+
"venue": "Knowledge-Based Systems, 275:110644.",
|
| 498 |
+
"url": null
|
| 499 |
+
}
|
| 500 |
+
},
|
| 501 |
+
{
|
| 502 |
+
"49": {
|
| 503 |
+
"title": "Towards more robust hate speech detection: using social context and user data.",
|
| 504 |
+
"author": "Seema Nagar, Ferdous Ahmed Barbhuiya, and Kuntal Dey. 2023.",
|
| 505 |
+
"venue": "Social Network Analysis and Mining, 13(1):47.",
|
| 506 |
+
"url": null
|
| 507 |
+
}
|
| 508 |
+
},
|
| 509 |
+
{
|
| 510 |
+
"50": {
|
| 511 |
+
"title": "Deep context-aware embedding for abusive and hate speech detection on twitter.",
|
| 512 |
+
"author": "Usman Naseem, Imran Razzak, and Ibrahim A Hameed. 2019.",
|
| 513 |
+
"venue": "Aust. J. Intell. Inf. Process. Syst., 15(3):69\u201376.",
|
| 514 |
+
"url": null
|
| 515 |
+
}
|
| 516 |
+
},
|
| 517 |
+
{
|
| 518 |
+
"51": {
|
| 519 |
+
"title": "Sghatecheck: Functional tests for detecting hate speech in low-resource languages of singapore.",
|
| 520 |
+
"author": "Ri Chi Ng, Nirmalendu Prakash, Ming Shan Hee, Kenny Tsu Wei Choo, and Roy Ka-Wei Lee. 2024.",
|
| 521 |
+
"venue": "In Proceedings of the 8th Workshop on Online Abuse and Harms (WOAH 2024), pages 312\u2013327.",
|
| 522 |
+
"url": null
|
| 523 |
+
}
|
| 524 |
+
},
|
| 525 |
+
{
|
| 526 |
+
"52": {
|
| 527 |
+
"title": "Extracting cultural commonsense knowledge at scale.",
|
| 528 |
+
"author": "Tuan-Phong Nguyen, Simon Razniewski, Aparna Varde, and Gerhard Weikum. 2023.",
|
| 529 |
+
"venue": "In WWW, pages 1907\u20131917.",
|
| 530 |
+
"url": null
|
| 531 |
+
}
|
| 532 |
+
},
|
| 533 |
+
{
|
| 534 |
+
"53": {
|
| 535 |
+
"title": "Subtle, pervasive, harmful: Racist and sexist remarks in public as hate speech.",
|
| 536 |
+
"author": "Laura Beth Nielsen. 2002.",
|
| 537 |
+
"venue": "Journal of Social issues.",
|
| 538 |
+
"url": null
|
| 539 |
+
}
|
| 540 |
+
},
|
| 541 |
+
{
|
| 542 |
+
"54": {
|
| 543 |
+
"title": "Playing the part of the sharp bully: Generating adversarial examples for implicit hate speech detection.",
|
| 544 |
+
"author": "Nicolas Ocampo, Elena Cabrio, and Serena Villata. 2023a.",
|
| 545 |
+
"venue": "In ACL (Findings).",
|
| 546 |
+
"url": null
|
| 547 |
+
}
|
| 548 |
+
},
|
| 549 |
+
{
|
| 550 |
+
"55": {
|
| 551 |
+
"title": "An in-depth analysis of implicit and subtle hate speech messages.",
|
| 552 |
+
"author": "Nicolas Benjamin Ocampo, Ekaterina Sviridova, Elena Cabrio, and Serena Villata. 2023b.",
|
| 553 |
+
"venue": "In ACL.",
|
| 554 |
+
"url": null
|
| 555 |
+
}
|
| 556 |
+
},
|
| 557 |
+
{
|
| 558 |
+
"56": {
|
| 559 |
+
"title": "Momenta: A multimodal framework for detecting harmful memes and their targets.",
|
| 560 |
+
"author": "Shraman Pramanick, Shivam Sharma, Dimitar Dimitrov, Md Shad Akhtar, Preslav Nakov, and Tanmoy Chakraborty. 2021.",
|
| 561 |
+
"venue": "pages 4439\u20134455. ACL.",
|
| 562 |
+
"url": null
|
| 563 |
+
}
|
| 564 |
+
},
|
| 565 |
+
{
|
| 566 |
+
"57": {
|
| 567 |
+
"title": "Leveraging intra-user and inter-user representation learning for automated hate speech detection.",
|
| 568 |
+
"author": "Jing Qian, Mai ElSherief, Elizabeth Belding, and William Yang Wang. 2018.",
|
| 569 |
+
"venue": "In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 118\u2013123.",
|
| 570 |
+
"url": null
|
| 571 |
+
}
|
| 572 |
+
},
|
| 573 |
+
{
|
| 574 |
+
"58": {
|
| 575 |
+
"title": "Systematic literature review of hate speech detection with text mining.",
|
| 576 |
+
"author": "Rini Rini, Ema Utami, and Anggit Dwi Hartanto. 2020.",
|
| 577 |
+
"venue": "In ICORIS. IEEE.",
|
| 578 |
+
"url": null
|
| 579 |
+
}
|
| 580 |
+
},
|
| 581 |
+
{
|
| 582 |
+
"59": {
|
| 583 |
+
"title": "Recognizing misogynous memes: Biased models and tricky archetypes.",
|
| 584 |
+
"author": "Giulia Rizzi, Francesca Gasparini, Aurora Saibene, Paolo Rosso, and Elisabetta Fersini. 2023.",
|
| 585 |
+
"venue": "Inf. Proc. Mgmt.",
|
| 586 |
+
"url": null
|
| 587 |
+
}
|
| 588 |
+
},
|
| 589 |
+
{
|
| 590 |
+
"60": {
|
| 591 |
+
"title": "Multilingual hatecheck: Functional tests for multilingual hate speech detection models.",
|
| 592 |
+
"author": "Paul R\u00f6ttger, Haitham Seelawi, Debora Nozza, Zeerak Talat, and Bertie Vidgen. 2022.",
|
| 593 |
+
"venue": "In Proceedings of the Sixth Workshop on Online Abuse and Harms (WOAH), pages 154\u2013169.",
|
| 594 |
+
"url": null
|
| 595 |
+
}
|
| 596 |
+
},
|
| 597 |
+
{
|
| 598 |
+
"61": {
|
| 599 |
+
"title": "Hatecheck: Functional tests for hate speech detection models.",
|
| 600 |
+
"author": "Paul R\u00f6ttger, Bertram Vidgen, Dong Nguyen, Zeerak Waseem, Helen Margetts, and Janet B Pierrehumbert. 2020.",
|
| 601 |
+
"venue": "arXiv preprint arXiv:2012.15606.",
|
| 602 |
+
"url": null
|
| 603 |
+
}
|
| 604 |
+
},
|
| 605 |
+
{
|
| 606 |
+
"62": {
|
| 607 |
+
"title": "The risk of racial bias in hate speech detection.",
|
| 608 |
+
"author": "Maarten Sap, Dallas Card, Saadia Gabriel, Yejin Choi, and Noah A Smith. 2019.",
|
| 609 |
+
"venue": "In ACL.",
|
| 610 |
+
"url": null
|
| 611 |
+
}
|
| 612 |
+
},
|
| 613 |
+
{
|
| 614 |
+
"63": {
|
| 615 |
+
"title": "Social bias frames: Reasoning about social and power implications of language.",
|
| 616 |
+
"author": "Maarten Sap, Saadia Gabriel, Lianhui Qin, Dan Jurafsky, Noah A. Smith, and Yejin Choi. 2020.",
|
| 617 |
+
"venue": "In ACL.",
|
| 618 |
+
"url": null
|
| 619 |
+
}
|
| 620 |
+
},
|
| 621 |
+
{
|
| 622 |
+
"64": {
|
| 623 |
+
"title": "Humorous hate speech on social media: A mixed-methods investigation of users\u2019 perceptions and processing of hateful memes.",
|
| 624 |
+
"author": "Ursula Kristin Schmid. 2023.",
|
| 625 |
+
"venue": "New Media & Society.",
|
| 626 |
+
"url": null
|
| 627 |
+
}
|
| 628 |
+
},
|
| 629 |
+
{
|
| 630 |
+
"65": {
|
| 631 |
+
"title": "Explaining toxic text via knowledge enhanced text generation.",
|
| 632 |
+
"author": "Rohit Sridhar and Diyi Yang. 2022.",
|
| 633 |
+
"venue": "In NAACL.",
|
| 634 |
+
"url": null
|
| 635 |
+
}
|
| 636 |
+
},
|
| 637 |
+
{
|
| 638 |
+
"66": {
|
| 639 |
+
"title": "A survey on hate speech detection and sentiment analysis using machine learning and deep learning models.",
|
| 640 |
+
"author": "Malliga Subramanian, Veerappampalayam Easwaramoorthy Sathiskumar, G Deepalakshmi, Jaehyuk Cho, and G Manikandan. 2023.",
|
| 641 |
+
"venue": "Alexandria Engineering Journal.",
|
| 642 |
+
"url": null
|
| 643 |
+
}
|
| 644 |
+
},
|
| 645 |
+
{
|
| 646 |
+
"67": {
|
| 647 |
+
"title": "Ruhate-mm: Identification of hate speech and targets using multimodal data from russia-ukraine crisis.",
|
| 648 |
+
"author": "Surendrabikram Thapa, Farhan Ahmad Jafri, Kritesh Rauniyar, Mehwish Nasim, and Usman Naseem. 2024.",
|
| 649 |
+
"venue": "In Companion Proceedings of the ACM on Web Conference 2024, pages 1854\u20131863.",
|
| 650 |
+
"url": null
|
| 651 |
+
}
|
| 652 |
+
},
|
| 653 |
+
{
|
| 654 |
+
"68": {
|
| 655 |
+
"title": "A multi-modal dataset for hate speech detection on social media: Case-study of russia-ukraine conflict.",
|
| 656 |
+
"author": "Surendrabikram Thapa, Aditya Shah, Farhan Jafri, Usman Naseem, and Imran Razzak. 2022.",
|
| 657 |
+
"venue": "In CASE@EMNLP.",
|
| 658 |
+
"url": null
|
| 659 |
+
}
|
| 660 |
+
},
|
| 661 |
+
{
|
| 662 |
+
"69": {
|
| 663 |
+
"title": "Llama 2: Open foundation and fine-tuned chat models.",
|
| 664 |
+
"author": "Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. 2023.",
|
| 665 |
+
"venue": "arXiv preprint arXiv:2307.09288.",
|
| 666 |
+
"url": null
|
| 667 |
+
}
|
| 668 |
+
},
|
| 669 |
+
{
|
| 670 |
+
"70": {
|
| 671 |
+
"title": "Visualizing vitriol: Hate speech and image sharing in the 2020 singaporean elections.",
|
| 672 |
+
"author": "Joshua Uyheng, Lynnette Hui Xian Ng, and Kathleen M Carley. 2020.",
|
| 673 |
+
"venue": "discourse, 7:17.",
|
| 674 |
+
"url": null
|
| 675 |
+
}
|
| 676 |
+
},
|
| 677 |
+
{
|
| 678 |
+
"71": {
|
| 679 |
+
"title": "Detecting and correcting hate speech in multimodal memes with large visual language model.",
|
| 680 |
+
"author": "Minh-Hao Van and Xintao Wu. 2023.",
|
| 681 |
+
"venue": "CoRR.",
|
| 682 |
+
"url": null
|
| 683 |
+
}
|
| 684 |
+
},
|
| 685 |
+
{
|
| 686 |
+
"72": {
|
| 687 |
+
"title": "Learning from the worst: Dynamically generated datasets to improve online hate detection.",
|
| 688 |
+
"author": "Bertie Vidgen, Tristan Thrush, Zeerak Waseem, and Douwe Kiela. 2021.",
|
| 689 |
+
"venue": "In ACL-IJCNLP.",
|
| 690 |
+
"url": null
|
| 691 |
+
}
|
| 692 |
+
},
|
| 693 |
+
{
|
| 694 |
+
"73": {
|
| 695 |
+
"title": "Evaluating GPT-3 generated explanations for hateful content moderation.",
|
| 696 |
+
"author": "Han Wang, Ming Shan Hee, Md. Rabiul Awal, Kenny Tsu Wei Choo, and Roy Ka-Wei Lee. 2023a.",
|
| 697 |
+
"venue": "In IJCAI.",
|
| 698 |
+
"url": null
|
| 699 |
+
}
|
| 700 |
+
},
|
| 701 |
+
{
|
| 702 |
+
"74": {
|
| 703 |
+
"title": "Multihateclip: A multilingual benchmark dataset for hateful video detection on youtube and bilibili.",
|
| 704 |
+
"author": "Han Wang, Tan Rui Yang, Usman Naseem, and Roy Ka-Wei Lee. 2024.",
|
| 705 |
+
"venue": "arXiv preprint arXiv:2408.03468.",
|
| 706 |
+
"url": null
|
| 707 |
+
}
|
| 708 |
+
},
|
| 709 |
+
{
|
| 710 |
+
"75": {
|
| 711 |
+
"title": "Large language models are latent variable models: Explaining and finding good demonstrations for in-context learning.",
|
| 712 |
+
"author": "Xinyi Wang, Wanrong Zhu, Michael Saxon, Mark Steyvers, and William Yang Wang. 2023b.",
|
| 713 |
+
"venue": "In NeurIPS.",
|
| 714 |
+
"url": null
|
| 715 |
+
}
|
| 716 |
+
},
|
| 717 |
+
{
|
| 718 |
+
"76": {
|
| 719 |
+
"title": "Hateful symbols or hateful people? predictive features for hate speech detection on twitter.",
|
| 720 |
+
"author": "Zeerak Waseem and Dirk Hovy. 2016.",
|
| 721 |
+
"venue": "In NAACL SRW.",
|
| 722 |
+
"url": null
|
| 723 |
+
}
|
| 724 |
+
},
|
| 725 |
+
{
|
| 726 |
+
"77": {
|
| 727 |
+
"title": "Spectrogram-based classification of spoken foul language using deep cnn.",
|
| 728 |
+
"author": "Abdulaziz Saleh Ba Wazir, Hezerul Abdul Karim, Mohd Haris Lye Abdullah, Sarina Mansor, Nouar AlDahoul, Mohammad Faizal Ahmad Fauzi, and John See. 2020.",
|
| 729 |
+
"venue": "In MMSP.",
|
| 730 |
+
"url": null
|
| 731 |
+
}
|
| 732 |
+
},
|
| 733 |
+
{
|
| 734 |
+
"78": {
|
| 735 |
+
"title": "Cross-lingual few-shot learning on unseen languages.",
|
| 736 |
+
"author": "Genta Winata, Shijie Wu, Mayank Kulkarni, Thamar Solorio, and Daniel Preo\u0163iuc-Pietro. 2022.",
|
| 737 |
+
"venue": "In AACL.",
|
| 738 |
+
"url": null
|
| 739 |
+
}
|
| 740 |
+
},
|
| 741 |
+
{
|
| 742 |
+
"79": {
|
| 743 |
+
"title": "Detection of hate speech in videos using machine learning.",
|
| 744 |
+
"author": "Ching Seh Wu and Unnathi Bhandary. 2020.",
|
| 745 |
+
"venue": "In Int. Conf. on Comp. Science and Comp. Int. IEEE.",
|
| 746 |
+
"url": null
|
| 747 |
+
}
|
| 748 |
+
},
|
| 749 |
+
{
|
| 750 |
+
"80": {
|
| 751 |
+
"title": "HARE: Explainable hate speech detection with step-by-step reasoning.",
|
| 752 |
+
"author": "Yongjin Yang, Joonkee Kim, Yujin Kim, Namgyu Ho, James Thorne, and Se-Young Yun. 2023a.",
|
| 753 |
+
"venue": "In EMNLP (Findings), pages 5490\u20135505.",
|
| 754 |
+
"url": "https://doi.org/10.18653/v1/2023.findings-emnlp.365"
|
| 755 |
+
}
|
| 756 |
+
},
|
| 757 |
+
{
|
| 758 |
+
"81": {
|
| 759 |
+
"title": "The dawn of lmms: Preliminary explorations with gpt-4v (ision).",
|
| 760 |
+
"author": "Zhengyuan Yang, Linjie Li, Kevin Lin, Jianfeng Wang, Chung-Ching Lin, Zicheng Liu, and Lijuan Wang. 2023b.",
|
| 761 |
+
"venue": "arXiv preprint arXiv:2309.17421.",
|
| 762 |
+
"url": null
|
| 763 |
+
}
|
| 764 |
+
},
|
| 765 |
+
{
|
| 766 |
+
"82": {
|
| 767 |
+
"title": "How hate speech varies by target identity: a computational analysis.",
|
| 768 |
+
"author": "Michael Miller Yoder, Lynnette Hui Xian Ng, David West Brown, and Kathleen M Carley. 2022.",
|
| 769 |
+
"venue": "arXiv preprint arXiv:2210.10839.",
|
| 770 |
+
"url": null
|
| 771 |
+
}
|
| 772 |
+
},
|
| 773 |
+
{
|
| 774 |
+
"83": {
|
| 775 |
+
"title": "Audio-based toxic language classification using self-attentive convolutional neural network.",
|
| 776 |
+
"author": "Midia Yousefi and Dimitra Emmanouilidou. 2021.",
|
| 777 |
+
"venue": "In EUSIPCO.",
|
| 778 |
+
"url": null
|
| 779 |
+
}
|
| 780 |
+
},
|
| 781 |
+
{
|
| 782 |
+
"84": {
|
| 783 |
+
"title": "Hate speech and counter speech detection: Conversational context does matter.",
|
| 784 |
+
"author": "Xinchen Yu, Eduardo Blanco, and Lingzi Hong. 2022a.",
|
| 785 |
+
"venue": "In Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 5918\u20135930.",
|
| 786 |
+
"url": null
|
| 787 |
+
}
|
| 788 |
+
},
|
| 789 |
+
{
|
| 790 |
+
"85": {
|
| 791 |
+
"title": "Hate speech and counter speech detection: Conversational context does matter.",
|
| 792 |
+
"author": "Xinchen Yu, Eduardo Blanco, and Lingzi Hong. 2022b.",
|
| 793 |
+
"venue": "In Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies.",
|
| 794 |
+
"url": null
|
| 795 |
+
}
|
| 796 |
+
},
|
| 797 |
+
{
|
| 798 |
+
"86": {
|
| 799 |
+
"title": "A fine-grained taxonomy of replies to hate speech.",
|
| 800 |
+
"author": "Xinchen Yu, Ashley Zhao, Eduardo Blanco, and Lingzi Hong. 2023.",
|
| 801 |
+
"venue": "In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 7275\u20137289.",
|
| 802 |
+
"url": null
|
| 803 |
+
}
|
| 804 |
+
},
|
| 805 |
+
{
|
| 806 |
+
"87": {
|
| 807 |
+
"title": "A survey of large language models.",
|
| 808 |
+
"author": "Wayne Xin Zhao, Kun Zhou, Junyi Li, Tianyi Tang, Xiaolei Wang, Yupeng Hou, Yingqian Min, Beichen Zhang, Junjie Zhang, Zican Dong, et al. 2023.",
|
| 809 |
+
"venue": "arXiv preprint arXiv:2303.18223.",
|
| 810 |
+
"url": null
|
| 811 |
+
}
|
| 812 |
+
}
|
| 813 |
+
],
|
| 814 |
+
"url": "http://arxiv.org/html/2401.16727v4"
|
| 815 |
+
}
|
20241030/2402.00123v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2402.00793v3.json
ADDED
|
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Human Expertise in Algorithmic Prediction",
|
| 3 |
+
"abstract": "We introduce a novel framework for incorporating human expertise into algorithmic predictions. Our approach leverages human judgment to distinguish inputs which are algorithmically indistinguishable, or \u201clook the same\" to predictive algorithms. We argue that this framing clarifies the problem of human-AI collaboration in prediction tasks, as experts often form judgments by drawing on information which is not encoded in an algorithm\u2019s training data. Algorithmic indistinguishability yields a natural test for assessing whether experts incorporate this kind of \u201cside information\", and further provides a simple but principled method for selectively incorporating human feedback into algorithmic predictions. We show that this method provably improves the performance of any feasible algorithmic predictor and precisely quantify this improvement. We find empirically that although algorithms often outperform their human counterparts on average, human judgment can improve algorithmic predictions on specific instances (which can be identified ex-ante). In an X-ray classification task, we find that this subset constitutes nearly of the patient population. Our approach provides a natural way of uncovering this heterogeneity and thus enabling effective human-AI collaboration.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Despite remarkable advances in machine learning, human judgment continues to play a critical role in many high-stakes prediction tasks. For example, consider the problem of triage in the emergency room, where healthcare providers assess and prioritize patients for immediate care. On one hand, prognostic algorithms offer significant promise for improving triage decisions; indeed, algorithmic predictions are often more accurate than even expert human decision makers [1 ###reference_b1###, 2 ###reference_b2###, 3 ###reference_b3###, 4 ###reference_b4###, 5 ###reference_b5###, 6 ###reference_b6###, 7 ###reference_b7###, 8 ###reference_b8###]. On the other hand, predictive algorithms may fail to fully capture the relevant context for each individual. For example, an algorithmic risk score may only have access to tabular electronic health records or other structured data (e.g., medical imaging), while a physician has access to many additional modalities\u2014not least of which is the ability to directly examine the patient!\nThese two observations\u2014that algorithms often outperform humans, but humans often have access to a richer information set\u2014are not in conflict with each other. Indeed, [9 ###reference_b9###] find exactly this phenomenon in an analysis of emergency room triage decisions. This suggests that, even in settings where algorithms outperform humans, algorithms might still benefit from some form of human input. Ideally this collaboration will yield human-AI complementarity [10 ###reference_b10###, 11 ###reference_b11###], in which a joint system outperforms either a human or algorithm working alone. Our work thus begins with the following question:\nWhen (and how) can human judgment improve the predictions of any learning algorithm?\nExample: X-ray classification. Consider the problem of diagnosing atelectasis (a partially or fully collapsed lung; we study this task in detail in Section 5 ###reference_###). Today\u2019s state-of-the-art deep learning models can perform well on these kinds of classification tasks using only a patient\u2019s chest X-ray as input [12 ###reference_b12###, 13 ###reference_b13###, 14 ###reference_b14###]. We are interested in whether we can further improve these algorithmic predictions by incorporating a \u201csecond opinion\u201d from a physician, particularly because the physician may have access to information (e.g., by directly observing the patient) which is not present in the X-ray.\nA first heuristic, without making any assumptions about the available predictive models, is to ask whether a physician can distinguish patients whose imaging data are identical. For example, if a physician can correctly indicate that one patient is suffering from atelectasis while another is not\u2014despite the patients having identical chest X-rays\u2014the physician must have information that the X-ray does not capture. In principle, this could form the basis for a statistical test: we could ask whether the physician performs better than random in distinguishing a large number of such patients. If so, even a predictive algorithm which outperforms the physician might benefit from human input.\nOf course, we are unlikely to find identical observations in continuous-valued and/or high-dimensional data (like X-rays). A natural relaxation is to instead consider observations which are sufficiently \u201csimilar\u201d, as suggested by [9 ###reference_b9###]. In this work we propose a more general notion of algorithmic indistinguishability, or coarser subsets of inputs in which no algorithm (in some rich, user-defined class) has significant predictive power. We show that these subsets can be discovered via a novel connection to multicalibration [15 ###reference_b15###], and formally demonstrate that using human feedback to predict outcomes within these subsets can outperform any algorithmic predictor (in the same user-defined class). In addition to being tractable, this framework is relevant from a decision-theoretic perspective: although we\u2019ve focused thus far on algorithms\u2019 fundamental informational constraints, it is also natural to ask whether an expert provides signal which is merely difficult for an algorithm to learn directly (due to e.g., limited training data or computational constraints). Our approach naturally interpolates between these contexts by defining indistinguishability with respect to whichever class of models is practically relevant for a given prediction task. We elaborate on these contributions below.\nContributions. We propose a novel framework for human-AI collaboration in prediction tasks. Our approach uses human feedback to refine predictions within sets of inputs which are algorithmically indistinguishable, or \u201clook the same\" to predictive algorithms. In Section 4 ###reference_### we present a simple method to incorporate this feedback only when it improves on the best feasible predictive model (and precisely quantify this improvement). This extends the \u201comnipredictors\u201d result of [16 ###reference_b16###] in the special case of squared error, which may be of independent interest.111We elaborate on connections to [16 ###reference_b16###] in Appendix D ###reference_###. In Section 5 ###reference_### we present experiments demonstrating that although humans fail to outperform algorithmic predictors on average, there exist specific (algorithmically indistinguishable) instances on which humans are more accurate than the best available predictor (and these instances are identifiable ex ante).222Code to replicate our experiments is available at https://github.com/ralur/heap-repl ###reference_###. In Section 6 ###reference_### we consider the complementary setting in which an algorithm provides recommendations to many downstream users, who independently choose when to comply. We provide conditions under which a predictor is robust to these compliance patterns, and thus be simultaneously optimal for all downstream users."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Related work",
|
| 15 |
+
"text": "The relative strengths of humans and algorithms. Our work is motivated by large body of literature which studies the relative strengths of human judgment and algorithmic decision making [1 ###reference_b1###, 2 ###reference_b2###, 3 ###reference_b3###, 5 ###reference_b5###] or identifies behavioral biases in decision making [17 ###reference_b17###, 18 ###reference_b18###, 19 ###reference_b19###, 20 ###reference_b20###]. More recent work also studies whether predictive algorithms can improve expert decision making [4 ###reference_b4###, 8 ###reference_b8###, 21 ###reference_b21###, 14 ###reference_b14###].\nRecommendations, deferral and complementarity. One popular approach for incorporating human judgment into algorithmic predictions is by deferring some instances to a human decision maker [22 ###reference_b22###, 23 ###reference_b23###, 24 ###reference_b24###, 25 ###reference_b25###, 26 ###reference_b26###, 27 ###reference_b27###]. Other work studies contexts where human decision makers are free to override algorithmic recommendations [28 ###reference_b28###, 29 ###reference_b29###, 30 ###reference_b30###, 31 ###reference_b31###, 14 ###reference_b14###], which may suggest alternative design criteria for these algorithms [32 ###reference_b32###, 33 ###reference_b33###, 34 ###reference_b34###]. More generally, systems which achieve human-AI complementarity (as defined in Section 1 ###reference_###) have been previously studied in [35 ###reference_b35###, 32 ###reference_b32###, 36 ###reference_b36###, 37 ###reference_b37###, 38 ###reference_b38###, 39 ###reference_b39###, 40 ###reference_b40###].\n[11 ###reference_b11###] develop a comprehensive taxonomy of this area, which generally takes the predictor as given, or learns a predictor which is optimized to complement a particular model of human decision making. In contrast, we give stronger results which demonstrate when human judgment can improve the performance of any model in a rich class of possible predictors (Section 4 ###reference_###), or when a single algorithm can complement many heterogeneous users (Section 6 ###reference_###).\nPerformative prediction. A recent line of work studies performative prediction [41 ###reference_b41###], or settings in which predictions influence future outcomes. For example, predicting the risk of adverse health outcomes may directly inform treatment decisions, which in turn affects future health outcomes. This can complicate the design and evaluation of predictive algorithms, and there is a growing literature which seeks to address these challenges [42 ###reference_b42###, 43 ###reference_b43###, 44 ###reference_b44###, 45 ###reference_b45###, 46 ###reference_b46###, 47 ###reference_b47###, 48 ###reference_b48###, 49 ###reference_b49###, 50 ###reference_b50###, 51 ###reference_b51###, 52 ###reference_b52###, 53 ###reference_b53###, 54 ###reference_b54###]. Performativity is also closely related to the selective labels problem, in which some historical outcomes are unobserved as a consequence of past human decisions [55 ###reference_b55###]. Though these issues arise in many canonical human-AI collaboration tasks, we focus on standard supervised learning problems in which predictions do not causally affect the outcome of interest. These include e.g., weather prediction, stock price forecasting and many medical diagnosis tasks, including the X-ray diagnosis task we study in Section 5 ###reference_###. In particular, although a physician\u2019s diagnosis may inform subsequent treatment decisions, it does not affect the contemporaneous presence or absence of a disease. More generally, our work can be applied to any \u201cprediction policy problem\u201d, where accurate predictions can be translated into policy gains without explicitly modeling causality [56 ###reference_b56###].\nAlgorithmic monoculture. Our results can be viewed as one approach to mitigating algorithmic monoculture, in which different algorithms make similar decisions and thus similar mistakes [57 ###reference_b57###, 58 ###reference_b58###]. This could occur because these systems are trained on similar datasets, or because they share similar inductive biases. We argue that these are precisely the settings in which a \u201cdiversifying\u201d human opinion may be especially valuable. We find empirical evidence for this in Section 5 ###reference_###: on instances where multiple models agree on a prediction, human judgment adds substantial predictive value.\nMulticalibration, omnipredictors and boosting. Our results make use of tools from theoretical computer science, particularly work on omnipredictors [16 ###reference_b16###] and its connections to multicalibration. [59 ###reference_b59###] show that multicalibration is tightly connected to a cryptographic notion of indistinguishability, which serves as conceptual inspiration for our work. Finally, [60 ###reference_b60###] provide an elegant boosting algorithm for learning multicalibrated partitions that we make use of in our experiments, and [61 ###reference_b61###] provide results which reveal tight connections between a related notion of \u201cswap agnostic learning\u201d, multi-group fairness, omniprediction and outcome indistinguishability."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "3",
|
| 19 |
+
"parent_section_id": null,
|
| 20 |
+
"section_name": "Methodology and preliminaries",
|
| 21 |
+
"text": "Notation. Let be a random variable denoting the inputs (or \u201cfeatures\u201d) which are available for making algorithmic predictions about an outcome . Let be an expert\u2019s prediction of , and let denote realizations of the corresponding random variables. Our approach is parameterized by a class of predictors , which is some set of functions mapping to . We interpret as the class of predictive models which are relevant (or feasible to implement) for a given prediction task; we discuss this choice further below. Broadly, we are interested in whether the expert prediction provides a predictive signal which cannot be extracted from by any .\nChoice of model class . For now we place no restrictions on , but it\u2019s helpful to consider a concrete model class (e.g., a specific neural network architecture) from which, given some training data, one could derive a particular model (e.g., via empirical risk minimization over ). The choice of could be guided by practical considerations; some domains might require interpretable models (e.g., linear functions) or be subject to computational constraints. We might also simply believe that a certain architecture or functional form is well suited to the task of interest. In any case, we are interested in whether human judgment can provide information which is not conveyed by any model in this class, but are agnostic as to how this is accomplished: an expert may have information which is not encoded in , or be deploying a decision rule which is not in \u2014 or both!\nAnother choice is to take to model more abstract limitations on the expert\u2019s cognitive process. In particular, to model experts who are subject to \u201cbounded rationality\u201d [62 ###reference_b62###, 63 ###reference_b63###], might be the set of functions which can be efficiently computed (e.g., by a circuit of limited complexity). In this case, an expert who provides a prediction which cannot be modeled by any must have access to information which is not present in the training data. We take the choice of as given, but emphasize that these two approaches yield qualitatively different insight about human expertise.\nIndistinguishability with respect to . Our approach will be to use human input to distinguish observations which are indistinguishable to any predictor . We formalize this notion of indistinguishability as follows:\nFor some , a set is -indistinguishable with respect to a function class and target if, for all ,\nTo interpret this definition, observe that the subset can be viewed as generalizing the intuition given in Section 1 ###reference_### for grouping identical inputs. In particular, rather than requiring that all are exactly equal, Definition 3.1 ###reference_theorem1### requires that all members of effectively \u201clook the same\" for the purposes of making algorithmic predictions about , as every is only weakly related to the outcome within . We now adopt the definition of a multicalibrated partition [16 ###reference_b16###] as follows:\nFor , is an -multicalibrated partition with respect to and if (1) partitions and (2) each is -indistinguishable with respect to and .333This is closely related to -approximate multicalibration [16 ###reference_b16###], which requires that Definition 3.1 ###reference_theorem1### merely holds in expectation over the partition. We work with a stronger pointwise definition for clarity, but our results can also be interpreted as holding for the \u2018typical\u2019 element of an -approximately multicalibrated partition.\nIntuitively, the partition \u201cextract[s] all the predictive power\" from [16 ###reference_b16###]; within each element of the partition, every is only weakly related to the outcome . Thus, while knowing that an input lies in subset may be highly informative for predicting \u2014 for example, it may be that for all \u2014 no predictor provides significant additional signal within . We provide a stylized example of such partitions in Figure 1 ###reference_### below.\n###figure_1### ###figure_2### It\u2019s not obvious that such partitions are feasible to compute, or even that they should exist. We\u2019ll show in Appendix B ###reference_### however that a multicalibrated partition can be efficiently computed for many natural classes of functions. Where the relevant partition is clear from context, we use to denote expectation, variance and covariance conditional on the event that . For a subset , we use and analogously.\nIncorporating human judgment into predictions. To incorporate human judgment into predictions, a natural heuristic is to first test whether the conditional covariance is nonzero within some indistinguishable subset. Intuitively, this indicates that the expert prediction is informative even though every model is not. This suggests a simple method for incorporating human expertise: first, learn a partition which is multicalibrated with respect to , and then use to predict within each indistinguishable subset. We describe this procedure in Algorithm 1 ###reference_### below, where we define a univariate learning algorithm as a procedure which takes one or more training observations and outputs a function which predicts using . For example, might be an algorithm which fits a univariate linear or logistic regression which predicts as a function of .\nAlgorithm 1 ###reference_### simply learns a different predictor of as a function of within each indistinguishable subset. As we show below, even simple instantiations of this approach can outperform the squared error achieved by any . This approach can also be readily extended to more complicated forms of human input (e.g., freeform text, which can be represented as a high-dimensional vector rather than a point prediction ), and can be used to test whether human judgment provides information that an algorithm cannot learn from the available training data. We turn to these results below."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "4",
|
| 25 |
+
"parent_section_id": null,
|
| 26 |
+
"section_name": "Technical results",
|
| 27 |
+
"text": "In this section we present our main technical results. For clarity, all results in this section are presented in terms of population quantities, and assume oracle access to a multicalibrated partition. We present corresponding generalization arguments and background on learning multicalibrated partitions in Appendices A ###reference_### and B ###reference_###, respectively. All proofs are deferred to Appendix C ###reference_###.\nLet be an -multicalibrated partition with respect to a model class and target . Let the random variable be such that iff . Define \nas\nThen, for any and ,\nThat is, the squared error incurred by the univariate linear regression of on within each indistinguishable subset outperforms that of any . This improvement is at least , up to an additive approximation error . We emphasize that is an arbitrary class, and may include complex, nonlinear predictors. Nonetheless, given a multicalibrated partition, a simple linear predictor can improve on the best . Furthermore, this approach allows us to selectively incorporate human feedback: whenever , we recover a coefficient of .444Recall that the population coefficient in a univariate linear regression of on is .\nNonlinear functions and high-dimensional feedback. Theorem 4.1 ###reference_theorem1### corresponds to instantiating Algorithm 1 ###reference_### with a univariate linear regression, but the same insight generalizes readily to other functional forms. For example, if is binary, it might be desirable to instead fit a logistic regression. We provide a similar guarantee for generic nonlinear predictors via Corollary A.1 ###reference_theorem1### in Appendix A ###reference_###. Furthermore, while the results above assume that an expert provides a prediction , the same insight extends to richer forms of feedback. For example, in a medical diagnosis task, a physician might produce free-form clinical notes which contain information that is not available in tabular electronic health records. Incorporating this kind of feedback requires a learning algorithm better suited to high-dimensional inputs (e.g., a deep neural network), which motivates our following result.\nLet be an -indistinguishable subset with respect to a model class and target . Let denote expert feedback which takes values in some arbitrary domain (e.g., freeform text, which might be tokenized to take values in for some ), and let be a function which satisfies the following approximate calibration condition for some and for all :\nThen, for any ,\nTo interpret this result, notice that (4 ###reference_###) requires only that the prediction cannot be significantly improved by any linear post-processing function. For example, this condition is satisfied by any calibrated predictor .555A calibrated predictor is one where . This is a fairly weak condition; for example, it is satisfied by the constant predictor [64 ###reference_b64###, 65 ###reference_b65###]. Furthermore, any which does not satisfy (4 ###reference_###) can be transformed by letting ; i.e., by linearly regressing on , in which case satisfies (4 ###reference_###). This result mirrors Theorem 4.1 ###reference_theorem1###: a predictor which depends only on human feedback can improve on the best within each element of a multicalibrated partition.\nTesting for informative experts. While we have thus far focused on incorporating human judgment to improve predictions, we may also be interested in the related question of simply testing whether human judgment provides information that cannot be conveyed by any algorithmic predictor. For example, such a test might be valuable in deciding whether to automate a given prediction task.\nTheorem 4.1 ###reference_theorem1### suggests a heuristic for such a test: if the conditional covariance is large, then we might expect that is somehow \u201cmore informative\u201d than any within . While covariance only measures a certain form of linear dependence between random variables, we now show that, in the special case of binary-valued algorithmic predictors, computing the covariance of and within an indistinguishable subset serves as a stronger test for whether provides any predictive information which cannot be expressed by the class .\nLet be an -multicalibrated partition for a binary-valued model class and target outcome . For all , let there be such that . Then, for all ,\nThat is, if each indistinguishable subset has a corresponding predictor which \u201cexplains\" the signal provided by the human, then the covariance of and is bounded within every . The contrapositive implies that a sufficiently large value of serves as a certificate for the property that no can fully explain the information that provides about within each indistinguishable subset. This can be viewed as a finer-grained extension of the test proposed in [9 ###reference_b9###].\nTaken together, our results demonstrate that algorithmic indistinguishability provides a principled way of reasoning about the complementary value of human judgment. Furthermore, this approach yields a concrete methodology for incorporating this expertise: we can simply use human feedback to predict within subsets which are indistinguishable on the basis of alone. Operationalizing these results depends critically on the ability to learn multicalibrated partitions, e.g., via the boosting algorithm proposed in [60 ###reference_b60###]. We provide additional detail on learning such partitions in Appendix B ###reference_###."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "5",
|
| 31 |
+
"parent_section_id": null,
|
| 32 |
+
"section_name": "Experiments",
|
| 33 |
+
"text": ""
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "5.1",
|
| 37 |
+
"parent_section_id": "5",
|
| 38 |
+
"section_name": "Chest X-ray interpretation",
|
| 39 |
+
"text": "We now instantiate our framework in the context of the chest X-ray classification task outlined in Section 1 ###reference_###. We consider the eight predictive models studied in [13 ###reference_b13###], which were selected from the leaderboard of a large public competition for X-ray image classification. These models serve as a natural benchmark class , against which we investigate whether radiologist assessments provide additional predictive value. These models were trained on a dataset of 224,316 chest radiographs collected across 65,240 patients [12 ###reference_b12###], and then evaluated on a holdout set of randomly sampled radiographs. This holdout set was annotated by eight radiologists for the presence () or absence () of five selected pathologies; the majority vote of five radiologists serves as a ground truth label, while the remaining three are held out to assess the accuracy of individual radiologists [13 ###reference_b13###].\nIn this section we focus on diagnosing atelectasis (a partial or complete collapse of the lung); we provide results for the other four pathologies in Appendix G ###reference_###. We first show, consistent with [12 ###reference_b12###, 13 ###reference_b13###], that radiologists fail to consistently outperform algorithmic classifiers on average. However, we then demonstrate that radiologists do outperform all eight leaderboard algorithms on a large subset (nearly of patients) which is indistinguishable with respect to this class of benchmark predictors. Because radiologists in this experimental setting only have access to the patient\u2019s chest X-ray, and because we do not apply any postprocessing to the radiologist assessments (i.e., , as defined in Algorithm 1 ###reference_###, is simply the identity function, which is most natural when and are binary), we interpret these results as providing a lower bound on the improvement that radiologists can provide relative to relying solely on algorithmic classifiers.\nAlgorithms are competitive with expert radiologists. We first compare the performance of the three benchmark radiologists to that of the eight leaderboard algorithms in Figure 2 ###reference_###. Following [13 ###reference_b13###], we use the Matthew\u2019s Correlation Coefficient (MCC) as a standard measure of binary classification accuracy [66 ###reference_b66###]. The MCC is simply the rescaled covariance between each prediction and the outcome, which corresponds directly to Definition 3.1 ###reference_theorem1###. In Figure 2 ###reference_### we see that radiologist performance is statistically indistinguishable from that of the algorithmic classifiers.\n###figure_3### Radiologists can refine algorithmic predictions. We now apply the results of Section 4 ###reference_### to investigate heterogeneity in the relative performance of humans and algorithms. First, we partition the patients into a pair of approximately indistinguishable subsets, which are exceptionally straightforward to compute when the class has a finite number of predictors (we provide additional detail in Appendix F ###reference_###). We plot the conditional performance of both the radiologists and the eight leaderboard algorithms within each of these subsets in Figure 3 ###reference_###.\n###figure_4### While Figure 2 ###reference_### found no significant differences between radiologists\u2019 and algorithms\u2019 overall performance, Figure 3 ###reference_### reveals a large subset \u2014 subset , consisting of of our sample \u2014 where radiologists achieve a better MCC than every algorithm. In particular, every algorithm predicts a positive label for every patient in this subset, and radiologists identify a sizable fraction of true negatives that the algorithms miss. For example, radiologist achieves a true positive rate of and a true negative rate of , while the algorithms achieve corresponding rates of and .\nThis partition is not necessarily unique, and in principle an analyst could compare the performance of radiologists and algorithms across different subsets which could yield an even starker difference in conditional performance. However, even for discrete-valued data, searching over all possible subsets is computationally and statistically intractable; instead, our approach provides a principled way of identifying the particular subsets in which human judgment is likely to add predictive value.\nOther pathologies. Although we focus here on atelectasis, and the findings above are consistent for two of the other four pathologies considered in [13 ###reference_b13###] (pleural effusion and consolidation): although radiologists fail to outperform algorithms on average, at least two of the radiologists outperform algorithmic predictions on a sizable minority of patients. Results for cardiomegaly and edema appear qualitatively similar, but we lack statistical power. We present these results in Appendix G ###reference_###."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "5.2",
|
| 43 |
+
"parent_section_id": "5",
|
| 44 |
+
"section_name": "Prediction of success in human collaboration",
|
| 45 |
+
"text": "We next consider the visual prediction task studied in [67 ###reference_b67###]. In this work, the authors curate photos taken of participants after they attempt an Escape the Room\u2019 puzzle\u2014\u201ca physical adventure game in which a group is tasked with escaping a maze by collectively solving a series of puzzles\u201d [67 ###reference_b67###]. A separate set of subjects are then asked to predict whether the group in each photo succeeded in completing the puzzle. Subjects in the control arm perform this task without any form of training, while subjects in the remaining arms are first provided with four, eight and twelve labeled examples, respectively. Their performance is compared to that of five algorithmic predictors, which use high-level features extracted from each photo (e.g., number of people, gender and ethnic diversity, age distribution etc.) to make a competing prediction. We provide a full list of features in Appendix H ###reference_###.\nAccuracy and indistinguishability in visual prediction. As in the X-ray diagnosis task, we first compare the performance of human subjects to that of the five off-the-shelf predictive algorithms considered in [67 ###reference_b67###]. We again find that although humans fail to outperform the best predictive algorithms, their predictions add significant predictive value on instances where the algorithms agree on a positive label. As our results are similar to those in the previous section, we defer them to Appendix I ###reference_###. We now use this task to illustrate another feature of our framework, which is the ability to incorporate human judgment into a substantially richer class of models.\nMulticalibration over an infinite class. While our previous results illustrate that human judgment can complement a small, fixed set of predictive algorithms, it\u2019s possible that a richer class could obviate the need for human expertise. To explore this, we now consider an infinitely large but nonetheless simple class of shallow (depth ) regression trees. We denote this class by .\nAs in previous sections, our first step will be to learn a partition which is multicalibrated with respect to . However, because is infinitely large, enumerating each and clustering observations according to their predictions is infeasible. Instead, we apply the boosting algorithm proposed in [60 ###reference_b60###] to construct a predictor such that no can substantially improve on the squared error of within any of its approximate level sets .666We discuss the connection between this condition and multicalibration in Appendix B ###reference_###. As in the previous section, we cannot necessarily verify whether the multicalibration condition is satisfied empirically. However, the theory provides guidance for choosing subsets, within which we can directly test conditional performance. We plot the correlation of the human subjects\u2019 predictions with the true label within these level sets in Figure 4 ###reference_###.\n###figure_5### Figure 4 ###reference_### highlights a key insight provided by our framework. On one hand, the predictions made by are more accurate out of sample () than even the best performing cohort (). Nonetheless, the predictions made by all four cohorts of human subjects are predictive of the outcome within every nonempty level set of of .777Though this result may initially seem counterintuitive, recall the classical Simpson\u2019s paradox: in a setting where is uncorrelated (or even negatively correlated) with , there may still exist a partition of the data such that the two are positively correlated within every subgroup. This suggests that humans provide information which cannot be extracted from the data by any . While we focus on shallow regression trees for concreteness, this approach extends to any function class for which it is feasible to learn a multicalibrated partition."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "6",
|
| 49 |
+
"parent_section_id": null,
|
| 50 |
+
"section_name": "Robustness to noncompliance",
|
| 51 |
+
"text": "We have thus far focused on how an algorithm might incorporate human feedback to improve prediction accuracy, or how an algorithmic decision pipeline might selectively defer to a human expert. However, many decision support tools are deployed in the opposite setting where the user instead decides when to defer to the algorithm. For example, physicians with access to a prognostic risk score may choose to simply ignore the risk score at their discretion. Furthermore, it is common for hospitals to employ different norms and policies governing the use of algorithmic predictors [68 ###reference_b68###]. Thus, although it is tempting to simply provide all downstream users with the single \u201cbest\u201d risk score, such an approach can be suboptimal if users vary in their compliance behavior [32 ###reference_b32###]. We illustrate the challenges of this kind of heterogeneous user behavior via the following stylized example.\nExample: the challenge of noncompliance. Consider a generic prognostic risk score which makes recommendations regarding patient care. Although physicians generally comply with the algorithm\u2019s recommendations, they are free to override it as they see fit. For example, suppose that one particular physician believes (correctly or not) that the algorithm underweights high blood pressure as a risk factor, and thus ignores its recommendations for these patients. A second physician similarly ignores algorithmic recommendations and instead exercises their own judgment for patients 65 and older.\nWhat does an optimal risk score look like in this setting? For the first physician, we would like to select the algorithm which minimizes error on patients who do not have high blood pressure, as these are the patients for whom the physician uses the algorithm\u2019s recommendations. Similarly, for the second physician, we would like to minimize error on patients who are under 65. Of course, there is no guarantee that these are the same algorithm: empirical risk minimization over the first population will, in general, produce a different predictor than empirical risk minimization over the second population. This is not just a finite sample problem; given any restricted model class (e.g., linear predictors), the optimal predictor for one subpopulation may not be optimal for a different subpopulation. For both practical and ethical reasons however, we cannot design individualized predictors for every physician; we would like to instead provide a risk score which is simultaneously \u201coptimal\" (in a sense we make precise below) for every user.\nNoncompliance-robust prediction. In Section A.3 ###reference_###, we show that, without further assumptions, the setting described above poses a statistically intractable problem: if physicians choose whether to comply in arbitrary ways, then we need a predictor which is simultaneously optimal for every possible patient subpopulation. The only predictor which satisfies this criterion is the Bayes optimal predictor, which is infeasible to learn in a finite data regime.\nHowever, suppose instead that physicians decide whether to defer to the algorithm using relatively simple heuristics. If we believe we can model these heuristics as a \u201csimple\u201d function of observable patient characteristics \u2014 e.g., that all compliance patterns can be expressed as a shallow decision tree, even if particular compliance behavior varies across physicians \u2014 then we can leverage this structure to design a single optimal predictor. In particular, we show next that, given a partition which is multicalibrated over the class of possible user compliance patterns, we can learn predictors which remain optimal even when users only selectively adopt the algorithm\u2019s recommendations.\nLet be a class of binary compliance policies, where, for , indicates that the user complies with the algorithm at . Let be a class of predictors and let be a partition which is -multicalibrated with respect to and the product class . Then, ,\nThat is, given an appropriately multicalibrated partition, we can derive a predictor which is simultaneously near-optimal for every downstream user. In particular, observe that the left hand side of (7 ###reference_###) is the squared error incurred by the constant prediction within when the user defers to the algorithm. Although this prediction does not depend on the policy , it remains competitive with the squared error incurred by any for any policy.\nUnsurprisingly, the bound becomes vacuous as goes to (we cannot hope to learn anything on arbitrarily rare subsets). This is consistent with our interpretation of however, as the performance of the algorithm matters little if the decision maker ignores nearly all recommendations.\nThis result is complementary to those in Section 4 ###reference_###\u2014rather than learning to incorporate feedback from a single expert, we can instead learn a single predictor which is (nearly) optimal for a rich class of downstream users whose behavior is modeled by some ."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "7",
|
| 55 |
+
"parent_section_id": null,
|
| 56 |
+
"section_name": "Discussion and limitations",
|
| 57 |
+
"text": "In this work we propose an indistinguishability-based framework for human-AI collaboration. Under this framework, we develop a set of methods for testing whether experts provide a predictive signal which cannot be replicated by an algorithmic predictor, and extend our results to settings in which users selectively adopt algorithmic recommendations. Beyond these methodological contributions, we argue that our framing clarifies when and why human judgment can improve algorithmic performance. In particular, a primary theme in our work is that even if humans do not consistently outperform algorithms on average, selectively incorporating human judgment can often improve predictions.\nA key limitation of our work is a somewhat narrow focus on minimizing a well-defined loss function over a well-defined (and stationary) distribution. This fails to capture decision makers with richer, multidimensional preferences (e.g., fairness, robustness or simplicity), and does not extend to settings in which predictions influence future outcomes (see the discussion of performative prediction in Section 2 ###reference_###) or the distribution otherwise changes over time. However, we view indistinguishability as a powerful primitive for modeling these more complex scenarios; for example, a decision maker might impose additional preferences \u2014 like a desire for some notion of fairness \u2014 to distinguish inputs which are otherwise indistinguishable with respect to the \u201cprimary\u201d outcome of interest. At a technical level, our results rely on the ability to efficiently learn multicalibrated partitions. While we give conditions under which this is feasible in Appendix B ###reference_### and a finite sample analysis in Appendix A ###reference_###, finding such partitions can be challenging for rich function classes.\nFinally, we caution that even in contexts which fit neatly into our framework, human decision makers can be critical for ensuring interpretability and accountability. Thus, although our approach can provide guidance for choosing the appropriate level of automation, it does not address the practical or ethical concerns which arise. Despite these limitations, we argue that indistinguishability helps to clarify the role of human expertise in algorithmic decision making, and this framing in turn provides fundamental conceptual and methodological insights for enabling effective human-AI collaboration."
|
| 58 |
+
}
|
| 59 |
+
],
|
| 60 |
+
"appendix": [
|
| 61 |
+
{
|
| 62 |
+
"section_id": "Appendix 1",
|
| 63 |
+
"parent_section_id": null,
|
| 64 |
+
"section_name": "Appendix A Additional technical results",
|
| 65 |
+
"text": "In this section we present additional technical results which complement those in the main text. All proofs are deferred to Appendix C ###reference_###.\nBelow we provide a simple extension of Theorem 4.1 ###reference_theorem1### from univariate linear regression to arbitrary univariate predictors of given .\nLet be an -indistinguishable subset with respect to a model class and target . Let be a function which satisfies the following approximate Bayes-optimality condition for :\nThen, for any ,\nThat is, any function which is nearly as accurate (in terms of squared error) as the univariate conditional expectation function provides the same guarantee as in Theorem 4.1 ###reference_theorem1###. This conditional expectation function is exactly what e.g., a logistic regression of on seeks to model. We provide a proof in Appendix C ###reference_###.\nFor simplicity, the technical results in Section 4 ###reference_### are presented in terms of population quantities. In this section we consider the empirical analogue of Corollary 4.2 ###reference_theorem2###, and provide a generalization argument which relates these empirical quantities to the corresponding population results in Section 4 ###reference_###. We focus our attention on Corollary 4.2 ###reference_theorem2###, as the proof follows similarly for Theorem 4.1 ###reference_theorem1### and Corollary A.1 ###reference_theorem1###.\nLet be some class of predictors mapping to . We\u2019ll begin with the setup of Corollary 4.2 ###reference_theorem2###, with denoting a fixed, measurable subset of the input space (we\u2019ll generalize to a full partition below). Further let denote the number of training examples which lie in the subset , and denote i.i.d. samples from the unknown joint distribution over the random variables conditional on the event that . Let denote the empirical risk minimizer within . Our goal will be to show that if there exists some satisfying (4 ###reference_###), then the empirical risk minimizer also approximately satisfies (4 ###reference_###) with high probability.\nLet , where , denote the class of squared loss functions indexed by , and let denote the Rademacher complexity of . Let denote the measure of . Then, for any , with probability\nat least , we have\nThat is, if there exists some satisfying (4 ###reference_###), then the empirical risk minimizer within the subset also satisfies (4 ###reference_###) with high probability, up to an approximation error that depends on the Rademacher complexity of . For many natural classes of functions, including linear functions, the Rademacher complexity (1) tends to as and (2) can be sharply bounded in finite samples (see e.g., Chapter 4 in [69 ###reference_b69###]). We provide a proof of Lemma A.2 ###reference_theorem2### in Appendix C ###reference_###.\nIn this section we formalize the argument in Section 6 ###reference_### to show that it is infeasible to learn predictors which are simultaneously \u201coptimal\u201d (in a sense we make precise below) for many downstream users who independently choose when to comply with the algorithm\u2019s recommendations. We provide a proof in Appendix C ###reference_###.\nLet be some class of predictors which map a countable input space to . We interpret a compliance policy such that indicates that the user complies with the algorithm\u2019s recommendation at . For all , unless almost everywhere, then there exists a deferral policy and constant such that:\nLemma A.4 ###reference_theorem4### indicates that for any predictor which is not the Bayes optimal predictor, there exists a compliance policy which causes it to underperform a constant prediction on the instances for which it is ultimately responsible. Because learning the Bayes optimal predictor from a finite sample of data is generally infeasible, this indicates that a predictor cannot reasonably be made robust to an arbitrary deferral policy. The proof, which we provide below, is intuitive: the decision maker can simply choose to comply on exactly those instances where performs poorly."
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
"section_id": "Appendix 2",
|
| 69 |
+
"parent_section_id": null,
|
| 70 |
+
"section_name": "Appendix B Learning multicalibrated partitions",
|
| 71 |
+
"text": "In this section we discuss two sets of conditions on which enable the efficient computation of multicalibrated partitions. An immediate implication of our first result is that any class of Lipschitz predictors induce a multicalibrated partition.\nLevel sets of are multicalibrated. Observe that one way in which Definition 3.1 ###reference_theorem1### is trivially satisfied (with ) is whenever every is constant within a subset . We relax this insight as follows: if the variance of every is bounded within , then is approximately indistinguishable with respect to .\nLet be a class of predictors and be a subset of the input space. If:\nthen is -indistinguishable with respect to and .\nThis result yields a natural corollary: the approximate level sets of (i.e., sets in which the range of every is bounded) are approximately indistinguishable. We state this result formally as Corollary B.2 ###reference_theorem2### below. We use exactly this approach to finding multicalibrated partitions in our study of a chest X-ray classification task in Section 5 ###reference_###.\nLet be a class of predictors whose range is bounded within some . That is, for all :\nThen is -indistinguishable with respect to .\nLemma B.1 ###reference_theorem1### also implies a simple algorithm for finding multicalibrated partitions when is Lipschitz with respect to some distance metric : observations which are close under are guaranteed to be approximately indistinguishable with respect to . We state this result formally as Corollary B.3 ###reference_theorem3### below.\nLet be the set of -Lipschitz functions with respect to some distance metric on . That is:\nLet for be some ()-net on with respect to . Then is -multicalibrated with respect to .\nProofs of the results above are provided in Appendix C ###reference_###.\nMulticalibration via boosting. Recent work by [60 ###reference_b60###] demonstrates that multicalibration is closely related to boosting over a function class . In this section we first provide conditions, adapted from [60 ###reference_b60###], which imply that the level sets of a certain predictor are multicalibrated with respect to ; that is, the set for every in the range of is approximately indistinguishable. We then discuss how these conditions yield a natural boosting algorithm for learning a predictor which induces a multicalibrated partition. In the lemma below, we use to denote the range of a function .\nLet be a function class which is closed under affine transformations; i.e., for all , and let . Let be the target outcome, and be some predictor with countable range . If, for all :\nthen the level sets of are -multicalibrated with respect to and .\nTo interpret this result, observe that (16 ###reference_###) is the difference between the mean squared error of and the mean squared error of within each level set . Thus, if the best fails to significantly improve on the squared error of within a given level set , then is indistinguishable with respect to (which is merely restricted to functions that lie in ). [60 ###reference_b60###] give a boosting algorithm which, given a squared error regression oracle888Informally, a squared error regression oracle for is an algorithm which can efficiently output for any distribution over . When the distribution is over a finite set of training data, this is equivalent to empirical risk minimization. We refer to [60 ###reference_b60###] for additional details, including generalization arguments. for , outputs a predictor which satisfies (16 ###reference_###). We make use of this algorithm in Section 5 ###reference_### to learn a partition of the input space in a visual prediction task. Although the class we consider there (the class of shallow regression trees ) is not closed under affine transformations, boosting over this class captures the spirit of our main result: while no can improve accuracy within the level sets of , humans provide additional predictive signal within three of them.\nTaken together, the results in this section demonstrate that multicalibrated partitions can be efficiently computed for many natural classes of functions, which in turn enables the application of results in Section 4 ###reference_###."
|
| 72 |
+
},
|
| 73 |
+
{
|
| 74 |
+
"section_id": "Appendix 3",
|
| 75 |
+
"parent_section_id": null,
|
| 76 |
+
"section_name": "Appendix C Proofs of primary results",
|
| 77 |
+
"text": "In this section we present proofs of our main results. Proofs of auxiliary lemmas are deferred to Appendix E ###reference_###.\nThe following simple lemma will be useful in our subsequent proofs. Let be a binary random variable. Then for any other random variable, :\nThis is exactly corollary 5.1 in [16 ###reference_b16###]. We provide the proof in Appendix E ###reference_###.\nProof of Theorem 4.1 ###reference_theorem1###\nA well known fact about univariate linear regression is that the coefficient of determination (or ) is equal to the square of the Pearson correlation coefficient between the regressor and the outcome (or ). In our context, this means that within any indistinguishable subset we have:\nWhere (23 ###reference_###) is an application of Popoviciu\u2019s inequality for variances, and makes use of the fact that almost surely. We can then obtain the final result by applying the following lemma, which extends the main result in [16 ###reference_b16###]. We provide a proof in Appendix E ###reference_###, but for now simply state the result as Lemma C.2 ###reference_theorem2### below.\nLet be an -multicalibrated partition with respect to a real-valued function class and target outcome . For all and , it follows that:\nWe provide further discussion of the relationship between Lemma C.2 ###reference_theorem2### and the main result of [16 ###reference_b16###] in Appendix D ###reference_### below.\nChaining inequalities (24 ###reference_###) and (23 ###reference_###) yields the final result:\n\u220e\nProof of Corollary 4.2 ###reference_theorem2###\nThe proof is almost immediate. Let , be the population regression coefficients obtained by regressing on within (as in Theorem 4.1 ###reference_theorem1###; the only difference is that we consider a single indistinguishable subset rather than a multicalibrated partition). This further implies, by the approximate calibration condition (4 ###reference_###):\nThe proof then follows from that of Theorem 4.1 ###reference_theorem1###, replacing with .\n\u220e\nProof of Theorem 4.3 ###reference_theorem3###\nFix any .\nWhere (29 ###reference_###) is the law of total covariance, (30 ###reference_###) follows from the assumption that , (31 ###reference_###) is the Cauchy-Schwarz inequality and (32 ###reference_###) applies Popoviciu\u2019s inequality to bound the variance of (which is assumed to lie in almost surely).\nWe now focus on bounding . Recall that by assumption, , so we should expect that conditioning on does not change the expectation of by too much.\nWhere the last step follows because is assumed to be bounded in almost surely. Applying Lemma C.1 ###reference_theorem1### to (37 ###reference_###) yields:\nWhere the second inequality follows because our analysis is conditional on for some -indistinguishable subset . Plugging (38 ###reference_###) into (32 ###reference_###) completes the proof.\n\u220e\nProof of Lemma A.4 ###reference_theorem4###\nLet be any model and let be a subset such that (1) (2) the Bayes optimal predictor is constant within and (3) for all . Such a subset must exist by assumption. It follows immediately that choosing\nsuffices to ensure that underperforms the constant prediction on the subset which delegates to . This implies that even if includes the class of constant predictors \u2014perhaps the simplest possible class of predictors\u2014then we cannot hope to find some which is simultaneously optimal for any choice of deferral policy.\n\u220e\nProof of Theorem 6.1 ###reference_theorem1###\nWe start with the assumption that is -multicalibrated with respect to and the product class . That is, both of the following hold:\nFirst, we\u2019ll show that this implies that the covariance of and is bounded even conditional on compliance. To streamline presentation we state this as a separate lemma; the proof is provided further below.\nGiven the setup of Theorem 6.1 ###reference_theorem1###, the following holds for all and :\nWe provide a proof in Appendix E ###reference_###. By Lemma C.2 ###reference_theorem2###, Lemma C.3 ###reference_theorem3### implies, for all and :\nThis is close to what we want to prove, except that the prediction depends on the choice of the policy . We\u2019ll argue that by (39 ###reference_###), . Indeed, because is binary, we can apply Lemma C.1 ###reference_theorem1### to recover:\nWe rewrite the LHS of (42 ###reference_###) to make use of this identity as follows:\nWhere the last step follows by observing that either (1) or (2) exactly one of or is strictly positive. Assume that ; otherwise the bound follows trivially. We bound the positive term by recalling that lies in , and we bound the negative term by applying (44 ###reference_###). Thus, the product of these two terms is at least . Finally, combining (50 ###reference_###) with (42 ###reference_###) completes the proof.\n\u220e\nProof of Corollary A.1 ###reference_theorem1###\nObserve that, because the conditional expectation function minimizes squared error with respect to all univariate functions of , we must have:\nWhere , are the population regression coefficients obtained by regression on as in Theorem 4.1 ###reference_theorem1###. This further implies, by the approximate Bayes-optimality condition (8 ###reference_###):\nThe proof then follows immediately from that of Theorem 4.1 ###reference_theorem1###.\n\u220e\nProof of Lemma A.2 ###reference_theorem2###\nWe will adopt the notation from the setup of Corollary 4.2 ###reference_theorem2###. Further let be some class of predictors mapping to , over which we seek to learn the mean-squared-error minimizing within some subset .\nLet denote the number of training examples which lie in the subset , and let denote the empirical loss incurred by some within . Finally, let denote the population analogue of .\nBy Hoeffding\u2019s inequality we have:\nthat is, is at least half its expectation with high probability. Let , where , be the class of squared loss functions indexed by . Let denote the Rademacher complexity of , which is defined as follows:\nRademacher Complexity\nFor a fixed , let denote i.i.d. Rademacher random variables (recall that Rademacher random variables take values and with equal probability ). Let denote i.i.d. random variables taking values in some abstract domain , and let be a class of real-valued functions over the domain . The Rademacher complexity of is denoted by , and is defined as follows:\nWhere the expectation is taken over both and . Intuitively, the Rademacher complexity is the expected maximum correlation between some and the noise vector .\nWe now make use of a standard uniform convergence result, which is stated in terms of the Rademacher complexity of a function class. We reproduce this theorem (lightly adapted to our notation) from the textbook treatment provided in [69 ###reference_b69###] below:\n(adapted from [69 ###reference_b69###])\n\nFor any -uniformly-bounded class of functions , any positive integer and any scalar , we have:\nwith probability at least .\nApplying Theorem C.5 ###reference_theorem5### (noting that is uniformly bounded in ) implies:\nwith probability at least . Finally, combining (53 ###reference_###) with (56 ###reference_###) further implies, for any ,\nwith probability at least , as desired.\nProof of Corollary A.3 ###reference_theorem3###\nWe\u2019ll adopt the notation from the setup of Lemma A.2 ###reference_theorem2### and Corollary A.3 ###reference_theorem3###. Observe that, if we were to take the union of every element of the partition with measure , the result would be a subset of the input space with measure at most . Note that this is merely an analytical device; we need not identify which subsets these are.\nThus, with probability at least , a newly sampled test observation will lie in some element of the partition with measure at least . Conditional on this event, we can directly apply the result of Lemma A.2 ###reference_theorem2###, plugging in a lower bound of for . This yields, for any and , lies in a subset such that,\nwith probability at least over the distribution of the training data and a test observation , as desired.\n\u220e\n\u220e\nProof of Lemma B.1 ###reference_theorem1###\nWe want to show for all and some such that .\nFix any . We then have:\nWhere (60 ###reference_###) is the Cauchy-Schwarz inequality, (61 ###reference_###) is Popoviciu\u2019s inequality and makes use of the fact that is bounded in by assumption, and (62 ###reference_###) uses the assumption that .\n\u220e\nProof of Corollary B.2 ###reference_theorem2###\nWe want to show that :\nBy assumption, is bounded in a range of within . From this it follows by Popoviciu\u2019s inequality for variances that :\nThe proof then follows from Lemma B.1 ###reference_theorem1###.\n\u220e\nProof of Corollary B.3 ###reference_theorem3###\nWe want to show that :\nBecause is part of a -net, there exists some such that ; that is, is bounded almost surely in some interval of length . From this it follows by Popoviciu\u2019s inequality for variances that :\nThe remainder of the proof follows from Lemma B.1 ###reference_theorem1###.\n\u220e\nProof of Lemma B.4 ###reference_theorem4###\nThe result follows Lemma 3.3 and Lemma 6.8 in [60 ###reference_b60###]. We provide a simplified proof below, adapted to our notation. We\u2019ll use to denote the expectation conditional on the event that for each . We use analogously.\nOur proof will proceed in two steps. First we\u2019ll show that:\nThis condition states that if there does not exist some in the range of where the best improves on the squared error incurred by by more than , then the predictor is -multicalibrated in the sense of [60 ###reference_b60###] with respect to the constrained class . We then show that the level sets of a predictor which satisfies (69 ###reference_###) form a multicalibrated partition (Definition 3.2 ###reference_theorem2###). That is:\nThat is, the level sets form a -multicalibrated partition with respect to .\nFirst, we\u2019ll prove the contrapositive of (69 ###reference_###). This proof is adapted from that of Lemma 3.3 in [60 ###reference_b60###]. Suppose there exists some and such that\nThen there exists such that:\nProof: let and . Then:\nWhere the last step follows because we took , the subset of the function class which only takes values in . This implies that if instead for all , then for all and . Next we prove (70 ###reference_###); that is, for all and implies for all .\nThe proof is adapted from that of Lemma 6.8 in [60 ###reference_b60###]; our proof differs beginning at (88 ###reference_###). Fix some and . By assumption we have, for all and ,\nThen we can show:\nWhere the last step follows from the assumption (81 ###reference_###). Now, let be the constant function which takes the value . We can write (88 ###reference_###) as follows:\nBecause is closed under affine transformations, it contains all constant functions, and thus, . , by definition, is the subset of whose range lies in . Because , it must be that and thus, . So, we can again invoke (81 ###reference_###) to show:\nWhich completes the proof.\n\u220e"
|
| 78 |
+
},
|
| 79 |
+
{
|
| 80 |
+
"section_id": "Appendix 4",
|
| 81 |
+
"parent_section_id": null,
|
| 82 |
+
"section_name": "Appendix D Relating Lemma\u00a0C.2 to Omnipredictors [16]",
|
| 83 |
+
"text": "In this section we compare Lemma C.2 ###reference_theorem2### to the main result of [16 ###reference_b16###]. While the main result of [16 ###reference_b16###] applies broadly to convex, Lipschitz loss functions, we focus on the special case of minimizing squared error. In this case, we show that Lemma C.2 ###reference_theorem2### extends the main result of [16 ###reference_b16###] to cover real-valued outcomes under somewhat weaker and more natural conditions. We proceed in three steps: first, to provide a self-contained exposition, we state the result of [16 ###reference_b16###] for real-valued outcomes in the special case of squared error (Lemma D.1 ###reference_theorem1### and Lemma D.2 ###reference_theorem2### below). Second, we derive a matching bound using Lemma C.2 ###reference_theorem2### (our result), which we do by demonstrating that the conditions of Lemma D.2 ###reference_theorem2### imply the conditions of Lemma C.2 ###reference_theorem2###. Finally, we show that Lemma C.2 ###reference_theorem2### applies in more generality than Lemma D.2 ###reference_theorem2###, under conditions which match those of Definition 3.2 ###reference_theorem2###.\nWe first state the main result of [16 ###reference_b16###] (adapted to our notation) below, which holds for binary outcomes .999As discussed in Section 1 ###reference_###, we also continue to elide the distinction between the \u2018approximate\u2019 multicalibration of [16 ###reference_b16###] and our focus on individual indistinguishable subsets. The results in this section can again be interpreted as holding for the \u2018typical\u2019 element of an approximately multicalibrated partition.\nLet be a subset which is -indistinguishable with respect to a real-valued function class and a binary target outcome . Then, for all ,\nThis result makes use of the fact that for any fixed , the squared error function is -Lipschitz with respect to over the interval . This is similar to Lemma C.2 ###reference_theorem2###, but requires that is binary-valued. In contrast, Lemma C.2 ###reference_theorem2### allows for real-valued , and gains a factor of on the RHS.101010Note that Lemma C.2 ###reference_theorem2### also requires that each takes values in , but this is without loss of generality when the outcome is bounded in ; projecting each onto can only reduce squared error. [16 ###reference_b16###] provide an alternate extension of Lemma D.1 ###reference_theorem1### to bounded, real-valued , which we present below for comparison to Lemma C.2 ###reference_theorem2###.\nExtending Lemma D.1 ###reference_theorem1### to real-valued . Fix some , and let . Let be a random variable which represents a discretization of into bins of size . That is, . Let denote the range of . Observe that the following holds for any function :\nWhere (93 ###reference_###) follows because the function is -Lipschitz with respect to over for all . We now work with the discretization of , and provide an analogue to Lemma D.1 ###reference_theorem1### under a modified indistinguishability condition for discrete-valued , which we\u2019ll show is stronger than Definition 3.1 ###reference_theorem1###.\nLet denote the range of a function , and let denote the indicator function. Let be a subset of the input space which satisfies the following condition with respect to a function class and discretized target :\nFor all and , if:\nThen:\nTo interpret this result, observe that (95 ###reference_###) yields a bound which is similar to Lemma D.1 ###reference_theorem1### under a modified \u2018pointwise\u2019 indistinguishability condition (94 ###reference_###) for any discretization of . Combining (95 ###reference_###) with (93 ###reference_###) further implies:\nDeriving Lemma D.2 ###reference_theorem2### using Lemma C.2 ###reference_theorem2###\nWe show next that the \u2018pointwise\u2019 condition (94 ###reference_###) for implies our standard indistinguishability condition (Definition 3.1 ###reference_theorem1###) for . This will allow us to apply Lemma C.2 ###reference_theorem2### to obtain a bound which is identical to (96 ###reference_###). Thus, we show that Lemma C.2 ###reference_theorem2### is at least as general as Lemma D.2 ###reference_theorem2###.\nLet be a subset satisfying (94 ###reference_###). Then, for all ,\nWe provide a proof in Appendix E ###reference_###. Thus, combining assumption (94 ###reference_###) with Lemma C.2 ###reference_theorem2### and (93 ###reference_###) recovers a result which is identical to Lemma D.2 ###reference_theorem2###. That is, for all :\nWhere (99 ###reference_###) follows from Lemma D.3 ###reference_theorem3###, (100 ###reference_0###) follows from Lemma C.2 ###reference_theorem2### and (101 ###reference_1###) follows from (93 ###reference_###).\nExtending Lemma C.2 ###reference_theorem2### beyond Lemma D.2 ###reference_theorem2###\nFinally, to show that Lemma C.2 ###reference_theorem2### extends Lemma D.2 ###reference_theorem2###, it suffices to provide a distribution over for some and a discrete-valued taking values such that Definition 3.1 ###reference_theorem1### is satisfied at level , but (94 ###reference_###) is not satisfied at (though in fact that taking also suffices for the following counterexample).\nConsider the joint distribution in which the events , and occur with equal probability conditional on for some . We suppress the conditioning event for clarity. Then:\nOn the other hand we have:\nThat is, we have . Thus, Lemma C.2 ###reference_theorem2### establishes a result which is similar to (D.2 ###reference_theorem2###) for real-valued under the weaker and more natural condition that is bounded, which remains well-defined for real-valued , rather than requiring the stronger pointwise bound (94 ###reference_###) for some discretization .\nFinally, we briefly compare Lemma C.2 ###reference_theorem2### to Theorem 8.3 in [16 ###reference_b16###], which generalizes Lemma D.2 ###reference_theorem2### to hold for linear combinations of the functions and to further quantify the gap between the \u2018canonical predictor\u2019 and any (or linear combinations thereof). These extensions are beyond the scope of our work, but we briefly remark that the apparently sharper bound of Theorem 8.3 is due to an incorrect assumption that the squared loss is -Lipschitz with respect to over the interval , for any . Correcting this to a Lipschitz constant of recovers the same bound as (101 ###reference_1###)."
|
| 84 |
+
},
|
| 85 |
+
{
|
| 86 |
+
"section_id": "Appendix 5",
|
| 87 |
+
"parent_section_id": null,
|
| 88 |
+
"section_name": "Appendix E Proofs of auxiliary lemmas",
|
| 89 |
+
"text": "Proof of Lemma C.1 ###reference_theorem1###\nWe\u2019ll first prove (18 ###reference_###).\nAs desired. To prove (19 ###reference_###), let . Applying the prior result yields:\nBecause , it follows that:\nFinally, because covariance is a bilinear function, . Chaining this identity with (113 ###reference_3###) yields the result.\n\u220e\nProof of Lemma C.2 ###reference_theorem2###\nThe result we want to prove specializes Theorem 6.3 in [16 ###reference_b16###] to the case of squared error, but our result allows rather than . The first few steps of our proof thus follow that of Theorem 6.3 in [16 ###reference_b16###]; our proof diverges starting at (118 ###reference_8###). We provide a detailed comparison of these two results in Appendix D ###reference_### above.\nFix any . We want to prove the following bound:\nIt suffices to show instead that:\nFrom this the result follows, as for any constant . To simplify notation, we drop the subscript and instead let the conditioning event be implicit throughout. We first show:\nWhere the second inequality is an application of Jensen\u2019s inequality (the squared loss is convex in ). From this it follows that:\nWhere each step until (124 ###reference_4###) follows by simply grouping terms and applying linearity of expectation. (124 ###reference_4###) follows by the multicalibration condition and the fact that the variance of any random variable is nonnegative.\n\u220e\nProof of Lemma C.3 ###reference_theorem3###\nFor any , assumption (40 ###reference_###) gives us . We\u2019ll expand the LHS to show the result.\nWhere (126 ###reference_6###) is the application of the law of total covariance. Observe now that is exactly what we want to bound. To do so, we now focus on expanding . This is:\nBecause is a binary valued function, we can apply Lemma C.1 ###reference_theorem1### to write\nPlugging in this identity yields:\nPlugging (133 ###reference_3###) into (128 ###reference_8###) yields:\nWhere (137 ###reference_7###) is the application of the reverse triangle inequality. Combining the initial assumption that is indistinguishable with respect to (40 ###reference_###) and (138 ###reference_8###) yields:\nWhich further implies:\nWhich finally implies , as desired. (141 ###reference_1###) and (142 ###reference_2###) follow from the assumption that , and (143 ###reference_3###) follows from the initial assumption that is -indistinguishable with respect to every (39 ###reference_###).\n\u220e\nProof of Lemma D.3 ###reference_theorem3###\nRecall that is a discrete random variable taking values . We again use to denote the range of . Our analysis below proceeds conditional on the event , which we suppress for clarity. We can show\nWhere (149 ###reference_9###) makes use of the fact that , (150 ###reference_0###) makes use of the identity (this is a straightforward analogue of Lemma C.1 ###reference_theorem1###), (151 ###reference_1###) applies assumption (94 ###reference_###), and (152 ###reference_2###) makes use of the fact that .\n\u220e"
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"section_id": "Appendix 6",
|
| 93 |
+
"parent_section_id": null,
|
| 94 |
+
"section_name": "Appendix F Finding multicalibrated partitions: chest X-ray diagnosis",
|
| 95 |
+
"text": "In this section we provide additional details related to the chest X-ray diagnosis task studied in Section 5 ###reference_###. As discussed in Section 5 ###reference_###, the relevant class is the set of 8 predictive models studied in [13 ###reference_b13###] for diagnosing various pathologies using chest X-ray imaging data. A key insight for learning a multicalibrated partition with respect to this class is that, because the class of models is finite, we can apply Lemma B.1 ###reference_theorem1### to discover indistinguishable subsets without training new models or even accessing the training data. Instead, we can think of the relevant input space as : the 8-dimensional vector containing the classifications output by the 8 models in for a given X-ray. Then, per Lemma Lemma C.3 ###reference_theorem3### and Corollary A.3, any subset of X-rays for which the Chebyshev distance (i.e., the maximum coordinatewise difference) in this 8-dimensional space is bounded must be approximately indistinguishable. Thus, to find approximately indistinguishable subsets, we simply apply an off-the-shelf clustering algorithm to minimize the intracluster Chebyshev distance. Code and instructions to replicate this procedure are available at https://github.com/ralur/heap-repl ###reference_###."
|
| 96 |
+
},
|
| 97 |
+
{
|
| 98 |
+
"section_id": "Appendix 7",
|
| 99 |
+
"parent_section_id": null,
|
| 100 |
+
"section_name": "Appendix G Additional experimental results: chest X-ray diagnosis",
|
| 101 |
+
"text": "In this section we provide results which are analagous to those presented in Section 5 ###reference_### for the four additional pathologies studied in [13 ###reference_b13###]. For each pathology\nwe first present a figure comparing the accuracy of the benchmark radiologists to that of the eight leaderboard algorithms, as in Figure 2 ###reference_### for atelectasis. We then present a figure which plots the conditional performance of each radiologist within a pair of indistinguishable subsets, as in Figure 3 ###reference_###.\nResults for diagnosing a pleural effusion are presented in Figure 5 ###reference_### and Figure 6 ###reference_###. Results for diagnosing cardiomegaly are presented in Figure 7 ###reference_### and Figure 8 ###reference_###. Results for diagnosing consolidation are presented in Figure 9 ###reference_### and Figure 10 ###reference_###. Finally, results for diagnosing edema are presented in Figure 11 ###reference_### and Figure 12 ###reference_###.\n###figure_6### ###figure_7### ###figure_8### ###figure_9### ###figure_10### ###figure_11### ###figure_12### ###figure_13###"
|
| 102 |
+
},
|
| 103 |
+
{
|
| 104 |
+
"section_id": "Appendix 8",
|
| 105 |
+
"parent_section_id": null,
|
| 106 |
+
"section_name": "Appendix H Additional experimental details: prediction from visual features",
|
| 107 |
+
"text": "In this section we provide additional details related to the escape the room task studied in Section 5 ###reference_###. Our work focuses on study 2 in [67 ###reference_b67###]; study 1 analyzes the same task with only two treatment arms. As discussed in Section 5 ###reference_###, the dimension of the feature space is 33, and encodes the mean/median/standard deviation (for numerical features) or one-hot encoding (for categorical features) of the following: the location in which the puzzle was attempted (Boston, Arizona, NYC etc.), the type of escape the room puzzle (theater, office, home etc.), the number of people in the photo, summary demographic information (age, gender, race, racial diversity), whether participants are smiling, and whether (and what type) of glasses they are wearing. This is exactly the set of features considered in [67 ###reference_b67###]; for additional detail on the data collection process we refer to their work.\nTo learn a partition of the input space, we apply the boosting algorithm proposed in [60 ###reference_b60###]. We discuss this algorithm and its connection to multicalibration in Appendix B ###reference_###."
|
| 108 |
+
},
|
| 109 |
+
{
|
| 110 |
+
"section_id": "Appendix 9",
|
| 111 |
+
"parent_section_id": null,
|
| 112 |
+
"section_name": "Appendix I Additional experimental results: prediction from visual features",
|
| 113 |
+
"text": "In this section we present additional experimental results for the visual prediction task studied in [67 ###reference_b67###].\nHumans fail to outperform algorithms. As in the X-ray diagnosis task in Section 5 ###reference_###, we first directly compare the performance of human subjects to that of the five off-the-shelf learning algorithms studied in [67 ###reference_b67###]. We again use the Matthew\u2019s Correlation Coefficient (MCC) as a measure of binary classification accuracy [66 ###reference_b66###]. Our results confirm one of the basic findings in [67 ###reference_b67###], which is that humans fail to outperform the best algorithmic predictors. We present these results in Figure 13 ###reference_###.\n###figure_14### Although these results indicate that humans fail to outperform algorithms on average in this visual prediction task, we now apply the results of Section 4 ###reference_### to investigate whether humans subjects can refine algorithmic predictions on specific instances.\nResolving indistinguishability via human judgment. As in Section 5 ###reference_###, we first form a partition of the set of input images which is multicalibrated with respect to the five predictors considered in Figure 13 ###reference_###. As indicated by Lemma B.1 ###reference_theorem1### and Corollary B.2 ###reference_theorem2###, we do this by partitioning the space of observations to minimize the variance of each of the five predictors within each subset.111111We describe this procedure in Appendix F ###reference_###. Because the outcome is binary, it is natural to partition the space of images into two clusters. We now examine the conditional correlation between each prediction and the true binary outcome within each of these subsets, which we plot in Figure 14 ###reference_###.\n###figure_15### As we can see, the human subjects\u2019 predictions perform comparably to the algorithms within subset , but add substantial additional signal when all five models predict a positive label (subset ). Thus, although the human subjects fail to outperform the algorithmic predictors on average (Figure 13 ###reference_###), there is substantial heterogeneity in their relative performance that can be identified ex-ante by partitioning the observations into two approximately indistinguishable subsets. In particular, as in the X-ray classification task studied in Section 5 ###reference_###, we find that human subjects can identify negative instances which are incorrectly classified as positive by all five algorithmic predictors."
|
| 114 |
+
}
|
| 115 |
+
],
|
| 116 |
+
"tables": {},
|
| 117 |
+
"image_paths": {
|
| 118 |
+
"1(a)": {
|
| 119 |
+
"figure_path": "2402.00793v3_figure_1(a).png",
|
| 120 |
+
"caption": "(a)\nFigure 1: Partitions which are approximately multicalibrated with respect to the class of hyperplane classifiers (we consider the empirical distribution placing equal probability on each observation). In both panels, no hyperplane classifier has significant discriminatory power within each subset.",
|
| 121 |
+
"url": "http://arxiv.org/html/2402.00793v3/x1.png"
|
| 122 |
+
},
|
| 123 |
+
"1(b)": {
|
| 124 |
+
"figure_path": "2402.00793v3_figure_1(b).png",
|
| 125 |
+
"caption": "(b)\nFigure 1: Partitions which are approximately multicalibrated with respect to the class of hyperplane classifiers (we consider the empirical distribution placing equal probability on each observation). In both panels, no hyperplane classifier has significant discriminatory power within each subset.",
|
| 126 |
+
"url": "http://arxiv.org/html/2402.00793v3/x2.png"
|
| 127 |
+
},
|
| 128 |
+
"2": {
|
| 129 |
+
"figure_path": "2402.00793v3_figure_2.png",
|
| 130 |
+
"caption": "Figure 2: The relative performance of radiologists and predictive algorithms for detecting atelectasis. Each bar plots the Matthews Correlation Coefficient between the corresponding prediction and the ground truth label. Point estimates are reported with 95%percent9595\\%95 % bootstrap confidence intervals.",
|
| 131 |
+
"url": "http://arxiv.org/html/2402.00793v3/x3.png"
|
| 132 |
+
},
|
| 133 |
+
"3": {
|
| 134 |
+
"figure_path": "2402.00793v3_figure_3.png",
|
| 135 |
+
"caption": "Figure 3: Conditional performance for atelectasis. Within subset 00 (n\ud835\udc5bnitalic_n = 148148148148), all algorithms predict Y\ud835\udc4cYitalic_Y=1111, thus achieving true positive rate (TPR) 1111, true negative rate (TNR) 00, and an MCC of 00. Radiologists achieve a corresponding (TPR, TNR) of (84.0%,42.9%)percent84.0percent42.9(84.0\\%,42.9\\%)( 84.0 % , 42.9 % ), (72.6%,47.6%)percent72.6percent47.6(72.6\\%,47.6\\%)( 72.6 % , 47.6 % ) and (93.4%,19.0%)percent93.4percent19.0(93.4\\%,19.0\\%)( 93.4 % , 19.0 % ), respectively. Subset 1111 (n\ud835\udc5bnitalic_n = 352352352352) contains the remaining patients. The baseline is a random permutation of the labels. Confidence intervals for algorithmic performance are not strictly valid (subsets are chosen conditional on the predictions), but are included for reference. All else is as in Figure 2.",
|
| 136 |
+
"url": "http://arxiv.org/html/2402.00793v3/x4.png"
|
| 137 |
+
},
|
| 138 |
+
"4": {
|
| 139 |
+
"figure_path": "2402.00793v3_figure_4.png",
|
| 140 |
+
"caption": "Figure 4: Human performance within the approximate level sets of a predictor h\u210ehitalic_h which is multicalibrated over \u2131RT\u20625superscript\u2131RT5\\mathcal{F}^{\\text{RT}5}caligraphic_F start_POSTSUPERSCRIPT RT 5 end_POSTSUPERSCRIPT. Level sets 0,1,010,1,0 , 1 , and 10101010 are the sets {x\u2223h\u2062(x)=0}conditional-set\ud835\udc65\u210e\ud835\udc650\\{x\\mid h(x)=0\\}{ italic_x \u2223 italic_h ( italic_x ) = 0 }, {x\u2223h\u2062(x)\u2208(0,.1]}conditional-set\ud835\udc65\u210e\ud835\udc650.1\\{x\\mid h(x)\\in(0,.1]\\}{ italic_x \u2223 italic_h ( italic_x ) \u2208 ( 0 , .1 ] }, and {x\u2223h\u2062(x)\u2208[.9,1]}conditional-set\ud835\udc65\u210e\ud835\udc65.91\\{x\\mid h(x)\\in[.9,1]\\}{ italic_x \u2223 italic_h ( italic_x ) \u2208 [ .9 , 1 ] }, and contain 259,309259309259,309259 , 309 and 292292292292 observations, respectively. All other level sets are empty in our test set. A random permutation of the labels is included as a baseline.",
|
| 141 |
+
"url": "http://arxiv.org/html/2402.00793v3/x5.png"
|
| 142 |
+
},
|
| 143 |
+
"5": {
|
| 144 |
+
"figure_path": "2402.00793v3_figure_5.png",
|
| 145 |
+
"caption": "Figure 5: The relative performance of radiologists and predictive algorithms for detecting a pleural effusion. Each bar plots the Matthews Correlation Coefficient between the corresponding prediction and the ground truth label. Point estimates are reported with 95%percent9595\\%95 % bootstrap confidence intervals.",
|
| 146 |
+
"url": "http://arxiv.org/html/2402.00793v3/x6.png"
|
| 147 |
+
},
|
| 148 |
+
"6": {
|
| 149 |
+
"figure_path": "2402.00793v3_figure_6.png",
|
| 150 |
+
"caption": "Figure 6: The conditional performance of radiologists and predictive algorithms for detecting a pleural effusion within two approximately indistinguishable subsets. A random permutation of the true labels is included as a baseline. The confidence intervals for the algorithmic predictors are not strictly valid (the subsets are chosen conditional on the predictions themselves), but are included for reference against radiologist performance. All else is as in Figure 5.",
|
| 151 |
+
"url": "http://arxiv.org/html/2402.00793v3/x7.png"
|
| 152 |
+
},
|
| 153 |
+
"7": {
|
| 154 |
+
"figure_path": "2402.00793v3_figure_7.png",
|
| 155 |
+
"caption": "Figure 7: The relative performance of radiologists and predictive algorithms for detecting cardiomegaly. Each bar plots the Matthews Correlation Coefficient between the corresponding prediction and the ground truth label. Point estimates are reported with 95%percent9595\\%95 % bootstrap confidence intervals.",
|
| 156 |
+
"url": "http://arxiv.org/html/2402.00793v3/x8.png"
|
| 157 |
+
},
|
| 158 |
+
"8": {
|
| 159 |
+
"figure_path": "2402.00793v3_figure_8.png",
|
| 160 |
+
"caption": "Figure 8: The conditional performance of radiologists and predictive algorithms for detecting cardiomegaly within two approximately indistinguishable subsets. A random permutation of the true labels is included as a baseline. The confidence intervals for the algorithmic predictors are not strictly valid (the subsets are chosen conditional on the predictions themselves), but are included for reference against radiologist performance. All else is as in Figure 7.",
|
| 161 |
+
"url": "http://arxiv.org/html/2402.00793v3/x9.png"
|
| 162 |
+
},
|
| 163 |
+
"9": {
|
| 164 |
+
"figure_path": "2402.00793v3_figure_9.png",
|
| 165 |
+
"caption": "Figure 9: The relative performance of radiologists and predictive algorithms for detecting consolidation. Each bar plots the Matthews Correlation Coefficient between the corresponding prediction and the ground truth label. Point estimates are reported with 95%percent9595\\%95 % bootstrap confidence intervals.",
|
| 166 |
+
"url": "http://arxiv.org/html/2402.00793v3/x10.png"
|
| 167 |
+
},
|
| 168 |
+
"10": {
|
| 169 |
+
"figure_path": "2402.00793v3_figure_10.png",
|
| 170 |
+
"caption": "Figure 10: The conditional performance of radiologists and predictive algorithms for detecting consolidation within two approximately indistinguishable subsets. A random permutation of the true labels is included as a baseline. The confidence intervals for the algorithmic predictors are not strictly valid (the subsets are chosen conditional on the predictions themselves), but are included for reference against radiologist performance. All else is as in Figure 9.",
|
| 171 |
+
"url": "http://arxiv.org/html/2402.00793v3/x11.png"
|
| 172 |
+
},
|
| 173 |
+
"11": {
|
| 174 |
+
"figure_path": "2402.00793v3_figure_11.png",
|
| 175 |
+
"caption": "Figure 11: The relative performance of radiologists and predictive algorithms for detecting edema. Each bar plots the Matthews Correlation Coefficient between the corresponding prediction and the ground truth label. Point estimates are reported with 95%percent9595\\%95 % bootstrap confidence intervals.",
|
| 176 |
+
"url": "http://arxiv.org/html/2402.00793v3/x12.png"
|
| 177 |
+
},
|
| 178 |
+
"12": {
|
| 179 |
+
"figure_path": "2402.00793v3_figure_12.png",
|
| 180 |
+
"caption": "Figure 12: The conditional performance of radiologists and predictive algorithms for detecting edema within two approximately indistinguishable subsets. A random permutation of the true labels is included as a baseline. The confidence intervals for the algorithmic predictors are not strictly valid (the subsets are chosen conditional on the predictions themselves), but are included for reference against radiologist performance. All else is as in Figure 11.",
|
| 181 |
+
"url": "http://arxiv.org/html/2402.00793v3/x13.png"
|
| 182 |
+
},
|
| 183 |
+
"13": {
|
| 184 |
+
"figure_path": "2402.00793v3_figure_13.png",
|
| 185 |
+
"caption": "Figure 13: Comparing the accuracy of human subjects\u2019 predictions to those made by off-the-shelf learning algorithms across four treatment conditions. Subjects in the control condition are given no training, while subjects in each of the three remaining conditions are presented with a small number of labeled examples before beginning the task. Each bar plots the Matthews correlation coefficient between the corresponding prediction and the true binary outcome; point estimates are reported with 95%percent9595\\%95 % bootstrap confidence intervals.",
|
| 186 |
+
"url": "http://arxiv.org/html/2402.00793v3/x14.png"
|
| 187 |
+
},
|
| 188 |
+
"14": {
|
| 189 |
+
"figure_path": "2402.00793v3_figure_14.png",
|
| 190 |
+
"caption": "Figure 14: The conditional performance of human and algorithmic predictions within two approximately indistinguishable subsets. Subset 1111 (n\ud835\udc5bnitalic_n = 189189189189) is the set in which all five predictors predict a positive label; Subset 00 (n\ud835\udc5bnitalic_n = 671671671671) contains the remaining observations. All else is as in Figure 13. The confidence intervals for the algorithmic predictors are not strictly valid (the subsets are chosen conditional on the predictions themselves), but are included for reference against human performance.",
|
| 191 |
+
"url": "http://arxiv.org/html/2402.00793v3/x15.png"
|
| 192 |
+
}
|
| 193 |
+
},
|
| 194 |
+
"validation": true,
|
| 195 |
+
"references": [],
|
| 196 |
+
"url": "http://arxiv.org/html/2402.00793v3"
|
| 197 |
+
}
|
20241030/2402.01607v3.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2402.02042v3.json
ADDED
|
@@ -0,0 +1,389 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Learning General Parameterized Policies for Infinite Horizon Average Reward Constrained MDPs via Primal-Dual Policy Gradient Algorithm",
|
| 3 |
+
"abstract": "This paper explores the realm of infinite horizon average reward Constrained Markov Decision Processes (CMDPs). To the best of our knowledge, this work is the first to delve into the regret and constraint violation analysis of average reward CMDPs with a general policy parametrization. To address this challenge, we propose a primal dual-based policy gradient algorithm that adeptly manages the constraints while ensuring a low regret guarantee toward achieving a global optimal policy. In particular, our proposed algorithm achieves objective regret and constraint violation bounds.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "The framework of Reinforcement Learning (RL) is concerned with a class of problems where an agent learns to yield the maximum cumulative reward in an unknown environment via repeated interaction. RL finds applications in diverse areas, such as wireless communication, transportation, and epidemic control [1 ###reference_b1###, 2 ###reference_b2###, 3 ###reference_b3###]. RL problems are mainly categorized into three setups: episodic, infinite horizon discounted reward, and infinite horizon average reward. Among them, the infinite horizon average reward setup is particularly significant for real-world applications. It aligns with most of the practical scenarios and captures their long-term goals. Some applications in real life require the learning procedure to respect the boundaries of certain constraints. In an epidemic control setup, for example, vaccination policies must take the supply shortage (budget constraint) into account. Such restrictive decision-making routines are described by constrained Markov Decision Processes (CMDP) [4 ###reference_b4###, 5 ###reference_b5###, 6 ###reference_b6###]. Existing papers on CMDPs utilize either a tabular or a linear MDP structure. This work provides the first algorithm for an infinite horizon average reward CMDP with general parametrization and proves its sub-linear regret and constraint violation bounds.\nThere are two primary ways to solve a CMDP problem in the infinite horizon average reward setting. The first one, known as the model-based approach, involves constructing estimates of the transition probabilities of the underlying CMDP, which are subsequently utilized to derive policies [6 ###reference_b6###, 7 ###reference_b7###, 5 ###reference_b5###]. The caveat of this approach is the large memory requirement to store the estimated parameters, which effectively curtails its applicability to CMDPs with large state spaces. The alternative strategy, known as the model-free approach, either directly estimates the policy function or maintains an estimate of the function, which is subsequently used for policy generation [8 ###reference_b8###]. Model-free algorithms typically demand lower memory and computational resources than their model-based counterparts. Although the CMDP has been solved in a model-free manner in the tabular [8 ###reference_b8###] and linear [9 ###reference_b9###] setups, its exploration with the general parameterization is still open and is the goal of this paper.\nGeneral parameterization indexes the policies by finite-dimensional parameters (e.g., weights of neural networks) to accommodate large state spaces. The learning is manifested by updating these parameters using policy gradient (PG)-type algorithms. Note that PG algorithms are primarily studied in discounted reward setups. For example, [10 ###reference_b10###] characterizes the sample complexities of the PG and the Natural PG (NPG) algorithms with softmax and direct parameterization. Similar results for general parameterization are obtained by [11 ###reference_b11###, 12 ###reference_b12###].\nThe regret analysis of a PG algorithm with the general parameterization has been recently performed for an infinite horizon average reward MDP without constraints [13 ###reference_b13###]. Similar regret and constraint violation analysis for the average reward CMDP is still missing in the literature. In this paper, we bridge this gap.\nChallenges and Contribution: We propose a PG-based algorithm with general parameterized policies for the average reward CMDP and establish its sublinear regret and constraint violation bounds. In particular, assuming the underlying CMDP to be ergodic, we demonstrate that our PG algorithm achieves an average optimality rate of and average constraint violation rate of . Invoking this convergence result, we establish that our algorithm achieves regret and constraint violation bounds of . Apart from providing the first sublinear regret guarantee for the average reward CMDP with general parameterization, our work also improves the state-of-the-art regret guarantee, in the model-free tabular setup [8 ###reference_b8###].\nDespite the availability of sample complexity analysis of PG algorithms with constraints in the discounted reward setup [14 ###reference_b14###, 4 ###reference_b4###] and PG algorithms without constraint in average reward setup [13 ###reference_b13###], obtaining sublinear regret and constraint violation bounds for their average reward counterpart is challenging.\n[14 ###reference_b14###, 4 ###reference_b4###] solely needs an estimate of the value function while we additionally need the estimate of the gain function, .\n[14 ###reference_b14###, 4 ###reference_b4###] assume access to a simulator to generate unbiased value estimates. In contrast, our algorithm uses a sample trajectory of length to estimate the values and gains and does not assume the availability of a simulator.\nThe first-order convergence analysis (Lemma 6) differs from that in [13 ###reference_b13###]. Note that both of these papers use an ascent-like inequality. In [13 ###reference_b13###], this bounds the term . The final result is obtained by calculating a sum over which cancels the intermediate terms and leaves us with . We would like to emphasize that the cancellation of the intermediate terms is crucial to establishing the result. However, a similar effort in our case only leads to a bound of . Note that directly performing a sum over this difference does not lead to the cancellation of intermediate terms. We had to take a different route and apply the bounds of the Lagrange multipliers and the estimate of the constraint function to achieve that goal.\nAfter solving the problems mentioned above, we prove convergence rate of the Lagrange function. Unfortunately, the strong duality property, which is central to proving convergence results of CMDPs for tabular and softmax policies, does not hold under the general parameterization. As a result, the convergence result for the dual problem does not automatically translate to that for the primal problem, which is a main difference from [13 ###reference_b13###]. We overcome this barrier by introducing a novel constraint violation analysis and a series of intermediate results (Lemma 16 ###reference_ma16###-18 ###reference_ma18###) that help disentangle the regret and constraint violation rates from the Lagrange convergence. It is important to mention that although the techniques applied are inspired by the [14 ###reference_b14###], those techniques cannot be directly adopted for average reward MDPs. This is primarily because the estimate is biased in the average case. To the best of our knowledge, constraint violation analysis with a biased estimate of the cost value is not available in the literature and is performed for the first time in our paper.\nDue to the presence of the Lagrange multiplier, the convergence analysis of a CMDP is much more convoluted than its unconstrained counterpart. The learning rate of the Lagrange update, , turns out to be pivotal in determining the growth rate of regret and constraint violation. Low values of push the regret down while simultaneously increasing the constraint violation. Finding the optimal value of that judiciously balances these two competing goals is one of the cornerstones of our analysis.\nRelated work for unconstrained average reward RL: In the absence of constraints, both model-based and model-free tabular setups have been widely studied for infinite horizon average reward MDPs. For example, the model-based algorithms proposed by [15 ###reference_b15###, 16 ###reference_b16###] achieve the optimal regret bound of . Similarly, the model-free algorithm proposed by [17 ###reference_b17###] for tabular MDP results in regret. Regret analysis for average reward MDP with general parametrization has been recently studied in [13 ###reference_b13###], where a regret bound of is derived.\nRelated work for constrained RL: The constrained reinforcement learning problem has been extensively studied both for infinite horizon discounted reward and episodic MDPs. For example, discounted reward CMDPs have been recently studied in the tabular setup [18 ###reference_b18###], with both softmax [14 ###reference_b14###, 19 ###reference_b19###], and general policy parameterization [14 ###reference_b14###, 19 ###reference_b19###, 4 ###reference_b4###, 12 ###reference_b12###]. Moreover, [20 ###reference_b20###, 21 ###reference_b21###, 22 ###reference_b22###] investigated episodic CMDPs in the tabular setting.\nRecently, the infinite horizon average reward CMDPs have been investigated in model-based setups [5 ###reference_b5###, 6 ###reference_b6###, 7 ###reference_b7###], tabular model-free setting [8 ###reference_b8###] and linear CMDP setting [9 ###reference_b9###]. For model-based CMDP setup, [6 ###reference_b6###] proposed a model-based online mirror descent algorithm in the ergodic setting which achieves for regret and violation at the same time. [7 ###reference_b7###] proposed algorithms based on the posterior sampling and the optimism principle that achieve regret with zero constraint violations in the ergodic setting. However, the above model-based algorithms cannot be extended to large state space. In the tabular model-free setup, the algorithm proposed by [8 ###reference_b8###] achieves a regret of with zero constraint violations. Finally, in the linear CMDP setting, [9 ###reference_b9###] achieves regret bound with zero constraint violation. Note that the linear CMDP setting assumes that the transition probability has a certain linear structure with a known feature map which is not realistic. Table 1 ###reference_### summarizes all relevant works. Unfortunately, none of these papers study the infinite horizon average reward CMDPs with general parametrization which is the main focus of our article.\nAdditionally, for the weakly communicating setting, [6 ###reference_b6###] proposed a model-based algorithm achieving for both regret and violation in tabular case. [9 ###reference_b9###] further extends such result to linear MDP setting with regret and violation. In general, it is difficult to propose a model-free algorithm with provable guarantees for Constrained MDPs (CMDPs) without considering the ergodic model. [6 ###reference_b6###] pointed out several extra challenges in Weakly communicating MDP compared to the ergodic case. For example, there is no uniform bound for the span of the value function for all stationary policies. It is also unclear how to estimate a policy\u2019s bias function accurately without the estimated model, which is an important step for estimating the policy gradient."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Formulation",
|
| 15 |
+
"text": "This paper analyzes an infinite-horizon average reward constrained Markov Decision Process (CMDP) denoted as where denotes the state space, is the action space of size , is the reward function, is the constraint cost function, is the state transition function where denotes a probability simplex with dimension , and is the initial distribution of states. A policy maps the current state to an action distribution. The average reward and cost of a policy, , is,\nwhere for average reward and cost respectively. The expectation is calculated over the distribution of all sampled trajectories where , , . For notational convenience, we shall drop the dependence on whenever there is no confusion. Our goal is to maximize the average reward function while ensuring that the average cost is above a given threshold. Without loss of generality, we can mathematically write this problem as,\nHowever, the above problem becomes difficult to handle when the underlying state space, is large. Therefore, we consider a class of parametrized policies, whose elements are indexed by a -dimensional parameter, where . Thus, the original problem in Eq (2 ###reference_###) can be reformulated as the following parameterized problem.\nWe denote , for notational convenience. Let, be a transition function induced by and defined as, , . If is such that for every policy , the function, is irreducible and aperiodic, then is called ergodic.\nThe CMDP is ergodic.\nErgodicity is a common assumption in the literature [23 ###reference_b23###, 24 ###reference_b24###]. If is ergodic, then , there exists a unique stationary distribution, given as follows.\nErgodicity implies that is independent of the initial distribution, , and obeys . Hence, the average reward and cost functions can be expressed as,\nwhere . Note that the functions , are also independent of the initial distribution, . Furthermore, , there exist a function such that the following Bellman equation is satisfied .\nwhere and is given as\n.\nNote that if satisfies , then it is also satisfied by for any arbitrary, . To uniquely define the value functions, we assume that . In this case, is given by,\nwhere the expectation is computed over all -induced trajectories. In a similar way, , one can uniquely define , as follows.\nMoreover, the advantage function is defined such that , , .\nAssumption 1 ###reference_umption1### also implies the existence of a finite mixing time. Specifically, for an ergodic MDP, , the mixing time is defined as follows.\nThe mixing time, , of the CMDP for a parameterized policy, , is defined as, .\nThe overall mixing time is . In this paper, is finite due to ergodicity.\nMixing time characterizes how fast a CMDP converges to its stationary state distribution, , under a given policy, . We also define the hitting time as follows.\nThe hitting time of an ergodic CMDP with respect to a policy, , is defined as .\nThe overall hitting time is defined as . In this paper, is finite due to ergodicity as well.\nDefine as the optimal solution to the unparameterized problem . For a given CMDP , and a time horizon , the regret and constraint violation of any algorithm is defined as follows.\nwhere the algorithm, , executes the actions, , based on the trajectory observed up to time, , and the state, is decided according to the state transition function, . For simplicity, we shall denote the regret and constraint violation as and respectively. Our goal is to design an algorithm that achieves low regret and constraint violation bounds."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "3",
|
| 19 |
+
"parent_section_id": null,
|
| 20 |
+
"section_name": "Proposed Algorithm",
|
| 21 |
+
"text": "We solve via a primal-dual algorithm based on the following problem.\nwhere . The function, , is called the Lagrange function and the Lagrange multiplier. Our algorithm updates the pair following the policy gradient iteration as shown below with an initial point , .\nwhere and are learning parameters and is the Slater parameter introduced in the following assumption. Finally, for any set, , denotes projection onto . The assumption stated below ensures that we have at least one feasible interior point solution to (2 ###reference_###).\nThere exists a and such that .\nNote that in (11 ###reference_###), the dual update is projected onto the set because the optimal dual variable for the parameterized problem is bounded in Lemma 16 ###reference_ma16###. The gradient of can be computed by invoking a variant of the well-known policy gradient theorem [25 ###reference_b25###].\nThe gradient of is computed as,\nwhere , , and are the advantage functions corresponding to reward and cost. In typical RL scenarios, learners do not have access to the state transition function, , and thereby to the functions and . This makes computing the exact gradient a difficult task. In Algorithm 1 ###reference_###, we demonstrate how one can still obtain good estimates of the gradient using sampled trajectories.\nAlgorithm 1 ###reference_### runs epochs, each of duration where defines a constant whose value is specified later. Clearly, . Note that the learner is assumed to know the horizon length, . This can be relaxed utilizing the well-known doubling trick [26 ###reference_b26###]. Additionally, it is assumed that the algorithm is aware of the mixing time and the hitting time. This assumption is common in the literature [13 ###reference_b13###, 17 ###reference_b17###]. The first step in obtaining a gradient estimate is estimating the advantage value for a given pair . This can be accomplished via Algorithm 2 ###reference_###. At the th epoch, a -induced trajectory, is obtained and subsequently passed to Algorithm 2 ###reference_### that searches for subtrajectories within it that start with a given state , are of length , and are at least distance apart from each other. Assume that there are such subtrajectories. Let the total reward and cost of the th subtrajectory be respectively and be its starting time. The value function estimates for the th epoch are\nThis leads to the following advantage estimator.\nwhere , . Finally, the gradient estimator is,\nwhere is the starting time of the th epoch. The parameters are updated following (12 ###reference_###). To update the Lagrange multiplier, we need an estimation of , which is obtained as the average cost of the th epoch. It should be noted that we remove the first samples from the th epoch because we require the state distribution emanating from the remaining samples to be close enough to the stationary distribution , which is the key to make close to . The following lemma demonstrates that is a good estimator of .\nThe following inequality holds , and sufficiently large .\nLemma 2 ###reference_ma2### shows that the error of our proposed advantage estimator can be bounded above as . We later utilize the above result to prove the goodness of the gradient estimator. It is to be clarified that our Algorithm 2 ###reference_### is inspired by Algorithm 2 of [17 ###reference_b17###]. However, while the authors of [17 ###reference_b17###] choose , we adapt . This subtle change is important in proving a sublinear regret for general parametrization."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "4",
|
| 25 |
+
"parent_section_id": null,
|
| 26 |
+
"section_name": "Global Convergence Analysis",
|
| 27 |
+
"text": "This section first shows that the sequence produced by Algorithm 1 ###reference_### is such that their associated Lagrange sequence converges globally. By expanding the Lagrange function, we then exhibit convergence of each of its components , . This is later used for regret and constraint violation analysis. Before delving into the details, we would like to state a few necessary assumptions.\nThe score function (stated below) is -Lipschitz and -smooth. Specifically, , and , the following inequalities hold.\nThe Lipschitz and smoothness properties of the score function are commonly assumed for policy gradient analyses [27 ###reference_b27###, 28 ###reference_b28###, 29 ###reference_b29###]. These assumptions hold for simple parameterization classes such as Gaussian policies.\nNote that by combining Assumption 3 ###reference_umption3### with Lemma 2 ###reference_ma2### and using the gradient estimator as given in , one can deduce the following result.\nThe following inequality holds provided that assumptions 1 ###reference_umption1### and 3 ###reference_umption3### are true.\nLemma 3 ###reference_ma3### claims that the gradient estimation error can be bounded as . We will use this result later to prove the global convergence of our algorithm.\nLet the transferred compatible function approximation error be defined as follows.\nwhere is the optimal solution of unparameterized problem in and\nWe assume that , and where is a positive constant.\nThe transferred compatible function approximation error quantifies the expressivity of the parameterized policy class. We can show that for softmax parameterization [10 ###reference_b10###] and linear MDPs [30 ###reference_b30###]. If the policy class is restricted, i.e., it does not contain all stochastic policies, turns out to be strictly positive. However, if the policy class is parameterized by a rich neural network, then can be assumed to be negligibly small [31 ###reference_b31###]. Such assumptions are common [29 ###reference_b29###, 10 ###reference_b10###].\nNote that defined in (19 ###reference_###) can be written as,\nwhere is the Moore-Penrose pseudoinverse and is the Fisher information matrix defined as,\nThere exists a constant such that is positive semidefinite where is an identity matrix of dimension, .\nAssumption 5 ###reference_umption5### is also called Fisher-non-degenerate policy assumption and is quite common in the literature [29 ###reference_b29###, 32 ###reference_b32###, 33 ###reference_b33###] in the policy gradient analysis. [29 ###reference_b29###][Assumption 2.1] provided a detailed discussion on the requirement of policy class to satisfy the assumption 5 ###reference_umption5###. Moreover, [34 ###reference_b34###] describes a class of policies that obeys assumptions simultaneously. The Lagrange difference lemma stated below is important in establishing global convergence.\nWith a slight abuse of notation, let . For any two policies , , the following result holds .\nWe now present a general framework for the convergence analysis of Algorithm 1 ###reference_###.\nIf the policy parameters, are updated via and assumptions 3 ###reference_umption3###, 4 ###reference_umption4###,and 5 ###reference_umption5### hold, then we have the following inequality for any ,\nwhere , is defined in (19 ###reference_###), and is the optimal solution to the problem .\nLemma 5 ###reference_ma5### proves that the optimality error of the Lagrange sequence can be bounded by the average first-order and second-order norms of the intermediate gradients. Note the presence of in the result. If the policy class is severely restricted, the optimality bound loses its importance. Consider the expectation of the second term in (5 ###reference_###). Note that,\nwhere follows from Assumption 5 ###reference_umption5###. The expectation of the third term in (5 ###reference_###) can be bounded as\nIn both and , is bounded above by Lemma 3 ###reference_ma3###. To bound the term, , the following lemma is applied.\nLet be -smooth, and . Then the following holds.\nNote the presence of in (20 ###reference_###). To ensure convergence, must be a function of .\nInvoking Lemma 3 ###reference_ma3###, we get the following relation under the same set of assumptions and the choice of parameters as in Lemma 6 ###reference_ma6###.\nApplying Lemma 3 ###reference_ma3### and (21 ###reference_###) in (4 ###reference_2###), we arrive at,\nSimilarly, using , we deduce the following.\nInequalities (22 ###reference_###) and (23 ###reference_###) lead to the following global convergence of the Lagrange function.\nLet be as described in Lemma 5 ###reference_ma5###. If assumptions 1 ###reference_umption1###5 ###reference_umption5### hold, are -smooth functions, , , and , then\nLemma 7 ###reference_ma7### establishes that the average difference between and is . Expanding the function, , and utilizing the update rule of the Lagrange multiplier, we achieve the global convergence for the objective and the constraint in Theorem 1 ###reference_orem1### (stated below). In its proof, Lemma 18 ###reference_ma18### (stated in the appendix) serves as an important tool in disentangling the convergence rates of regret and constraint violation. Interestingly, Lemma 18 is built upon the strong duality property of the unparameterized optimization (2 ###reference_###) and has no apparent direct connection with the parameterized setup.\nConsider the same parameters as in Lemma 7 ###reference_ma7### and set , . We have,\nwhere is a solution to (2 ###reference_###). In the above bounds, we write only the dominating terms of .\nTheorem 1 ###reference_orem1### establishes convergence rates for both the objective and the constraint violation."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "5",
|
| 31 |
+
"parent_section_id": null,
|
| 32 |
+
"section_name": "Regret and Violation Analysis",
|
| 33 |
+
"text": "In this section, we use the convergence result of the previous section to bound the expected regret and constraint violation of Algorithm 1 ###reference_###. Note that the regret and constraint violation decompose as,\nwhere . Observe that the expectation of the first terms in regret and violation can be bounded by Theorem 1 ###reference_orem1###. The expectation of the second term in regret and violation can be expanded as follows,\nwhere . Equality uses the Bellman equation and follows from the definition of . The first term in the last line of Eq. (24 ###reference_###) can be upper bounded by Lemma 8 ###reference_ma8### (stated below). On the other hand, the second term can be upper bounded as using Lemma 9 ###reference_ma9###.\nIf assumptions 1 ###reference_umption1### and 3 ###reference_umption3### hold, then for where , the following inequalities hold , and sufficiently large :\nwhere , and is an arbitrary sequence of states.\nLemma 8 ###reference_ma8### states that the obtained policy parameters are such that the average consecutive difference in the sequence , decreases with time horizon, . We would like to emphasize that Lemma 8 ###reference_ma8### works for both reward and constraint functions. Hence, we can prove our regret guarantee and constraint violation as shown below.\nIf assumptions 1 ###reference_umption1###5 ###reference_umption5### hold, \u2019s are -smooth, and are sufficiently large, then our proposed Algorithm 1 ###reference_### achieves the following expected regret and constraint violation bounds with learning rates and .\nThe detailed expressions of these bounds are provided in the Appendix. Here, we keep only those terms that emphasize the order of . Note that our result outperforms the state-of-the-art model-free tabular result in average-reward CMDP [8 ###reference_b8###]. However, our regret bound is worse than that achievable in average reward unconstrained MDP with general parameterization [13 ###reference_b13###]. Interestingly, the gap between the convergence results of constrained and unconstrained setups is a common observation across the literature. For example, in the tabular model-free average reward MDP, the state-of-the-art regret bound for unconstrained setup, [17 ###reference_b17###], is better than that in the constrained setup, [8 ###reference_b8###]."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "6",
|
| 37 |
+
"parent_section_id": null,
|
| 38 |
+
"section_name": "Conclusions",
|
| 39 |
+
"text": "This paper establishes the first sublinear regret and constraint violation bounds in the average reward CMDP setup with general parametrization (and do not assume the underlying constrained Markov Decision Process (CMDP) to be tabular or linear). We show that our proposed algorithm achieves regret and constraint violation bounds where is the time horizon. Note that the state of the art in unconstrained counterpart is . Closing this gap by designing more efficient algorithms is an open question in the average reward CMDP literature with the general parametrization. Moreover, our current algorithm requires the knowledge of mixing time. Relaxing such assumptions is another important future direction in realistic settings. For further discussions on future work directions, the readers are referred to [35 ###reference_b35###]."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "7",
|
| 43 |
+
"parent_section_id": null,
|
| 44 |
+
"section_name": "Acknowledgement",
|
| 45 |
+
"text": "This research was supported in part by the National Science Foundation under grant CCF-2149588 and Cisco, Inc."
|
| 46 |
+
}
|
| 47 |
+
],
|
| 48 |
+
"appendix": [
|
| 49 |
+
{
|
| 50 |
+
"section_id": "Appendix 1",
|
| 51 |
+
"parent_section_id": null,
|
| 52 |
+
"section_name": "Appendix A Proofs for Lemmas in Section 3",
|
| 53 |
+
"text": "Since the first step of the proof works in the same way for functions and , we use the generic notations where and derive the following.\nwhere the step (a) is a consequence of and the Bellman equation. Multiplying both sides by , taking a sum over , and rearranging the terms, we obtain the following.\nwhere uses the fact that is a stationary distribution. Note that,\nWe can, therefore, replace the function in the policy gradient with the advantage function , . Thus,\nThe proof is completed using the definitions of and .\nThe proof is similar to the proof of [17 ###reference_b17###, Lemma 6]. Consider the th epoch and assume that is denoted as for notational convenience. Let, be the number of disjoint sub-trajectories of length that start with the state and are at least distance apart (found by Algorithm 2 ###reference_###). Let, be the sum of rewards or constraint ( accordingly) observed in the th sub-trajectory and denote its starting time. The advantage function estimate is,\nNote the following,\nwhere follows from the definition of as given in , is an application of the definition of given in , and follows from the Bellman equation. Define the following quantity.\nUsing Lemma 10 ###reference_ma10###, we get which implies, . Observe that,\nwhere . Using the bound on , we derive, , which implies,\nNote that (35 ###reference_###) cannot be directly used to bound the bias of . This is because the random variable is correlated with the variables . To decorrelate them, imagine a CMDP where the state distribution resets to the stationary distribution, after exactly time steps since the completion of a sub-trajectory. In other words, if a sub-trajectory starts at , and ends at , then the system \u2018rests\u2019 for additional steps before rejuvenating with the state distribution, at . Clearly, the wait time between the reset after the th sub-trajectory and the start of the th sub-trajectory is, , . Let be the difference between the start time of the th epoch and the start time of the first sub-trajectory. Note that,\nonly depends on the initial state, and the induced transition function, ,\n, where , depends on the stationary distribution, , and the induced transition function, ,\nonly depends on as other segments of the epoch have fixed length, .\nClearly, in this imaginary CMDP, the sequence, , and hence, is independent of . Let, denote the expectation operation and denote the probability of events in this imaginary system. Define the following.\nwhere is given in . Note that we have suppressed the dependence on , , and while defining to remove clutter. Using , one can write . Moreover,\nwhere uses the bound derived in , and the fact that are zero mean independent random variables conditioned on . Note that almost surely, via Lemma 9 ###reference_ma9###, and as shown in . Combining, we get, (see the definition of in (36 ###reference_###)). Invoking this bound into , we get the following result.\nNote that, one can use Lemma 11 ###reference_ma11### to bound the following violation probability.\nwhere is a consequence of the fact that for sufficiently large . Finally, note that, if , where is defined as,\nthen there exists at least one that exceeds which can happen with the following maximum probability according to Lemma 11 ###reference_ma11###.\nThe above probability bound can be used to obtain the following result,\nInjecting and into , we finally obtain the following.\nEq. demonstrates that our desired inequality is obeyed in the imaginary system. We now need a mechanism to translate this result to our actual CMDP. Note that where , and . We have,\nThe last inequality uses the non-negativity of . Observe that, for a fixed sequence, , we have,\nThe difference between and arises because , . Note that the ratio of these two terms can be bounded as follows,\nwhere is a consequence of Lemma 10 ###reference_ma10###. We have,\nwhere uses the fact that . Combining and , we get,\nwhere follows from . Using the definition of , we get,\nThis concludes the proof.\n\u220e"
|
| 54 |
+
},
|
| 55 |
+
{
|
| 56 |
+
"section_id": "Appendix 2",
|
| 57 |
+
"parent_section_id": null,
|
| 58 |
+
"section_name": "Appendix B Proofs for the Section of Global Convergence Analysis",
|
| 59 |
+
"text": "Recall from Eq. (15 ###reference_###) that,\nDefine the following quantity,\nwhere is the starting time of the th epoch. Note that the true gradient is given by,\nUsing Assumption 3 ###reference_umption3###, Lemma 9 ###reference_ma9###, and , one can exhibit that , which implies . Applying Lemma 14 ###reference_ma14###, one, therefore, arrives at\nFinally, the difference, can be bounded as follows.\nwhere follows from Assumption 3 ###reference_umption3### and Jensen\u2019s inequality whereas follows from Lemma 2 ###reference_ma2###. Combining, and , we conclude the result.\n\u220e\nUsing the Lemma 12 ###reference_ma12###, it is obvious to see that\nWe conclude the lemma using the definition of and .\n\u220e\nWe start with the definition of KL divergence.\nwhere the step (a) holds by Assumption 3 ###reference_umption3### and step (b) holds by Lemma 4 ###reference_ma4###. Step (c) uses the convexity of the function . Finally, step (d) comes from the Assumption 4 ###reference_umption4###. Rearranging items, we have\nSumming from to , using the non-negativity of KL divergence and dividing the resulting expression by , we get the desired result.\n\u220e\nBy the -smooth property of the objective function and constraint function, we know that is a -smooth function. Thus,\nwhere step (a) follows from the fact that and inequality (b) holds due to the Cauchy-Schwarz inequality. Now, adding on both sides, we have\nwhere (a) holds by the definition of and step (b) is true because and where the last inequality uses the fact that . Summing over , we have,\nwhich leads to the following.\nRearranging the terms and using due to the dual update, we arrive at the following.\nChoosing and dividing both sides by , we conclude the result.\nRecall that . Thus,\nThis completes the proof.\n\u220e"
|
| 60 |
+
},
|
| 61 |
+
{
|
| 62 |
+
"section_id": "Appendix 3",
|
| 63 |
+
"parent_section_id": null,
|
| 64 |
+
"section_name": "Appendix C Proofs for the Regret and Violation Analysis",
|
| 65 |
+
"text": "Using Taylor\u2019s expansion, we can write the following , .\nwhere is some convex combination111Note that, in general, is dependent on . of and and results from Assumption 3 ###reference_umption3###. This concludes the first statement. Applying (80 ###reference_###) and Lemma 12 ###reference_ma12###, we obtain the following for .\nInequality uses Lemma 9 ###reference_ma9### and the update rule . Step holds by the Cauchy inequality and Jensen inequality whereas can be derived using and substituting . This establishes the second statement. Next, recall from that for any policy , .\nNote that, for any policy parameter , and any state , the following holds.\nDefine the following quantity.\nLemma 10 ###reference_ma10### states that for sufficiently large , we have for any policy and state . Combining this result with the fact that the function is absolutely bounded in , we obtain,\nwhere follows from (81 ###reference_###) and substituting . For the first term, note that,\nInequality holds since every row of sums to and . Moreover, invoking (80 ###reference_###), and the parameter update rule , we get,\nPlugging the above result into and using a recursive argument, we get,\nFinally, we have\nwhere follows from (22 ###reference_###). Moreover, notice that,\nwhere follows from (80 ###reference_###) and the update rule whereas is a consequence of (22 ###reference_###). Combining (84 ###reference_###), (86 ###reference_###), and (87 ###reference_###), we establish the third statement.\n\u220e\nRecall the decomposition of the regret in section 5 ###reference_### and take the expectation.\nUsing the result in (78 ###reference_###), Lemma 8 ###reference_ma8### and Lemma 9 ###reference_ma9###, we get,\nSimilarly, for the constraint violation, we have\nUsing the result in (79 ###reference_###), Lemma 8 ###reference_ma8### and Lemma 9 ###reference_ma9###, we get,\nThis concludes the theorem.\n\u220e"
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
"section_id": "Appendix 4",
|
| 69 |
+
"parent_section_id": null,
|
| 70 |
+
"section_name": "Appendix D Some Auxiliary Lemmas for the Proofs",
|
| 71 |
+
"text": "[17 ###reference_b17###, Lemma 14] For any ergodic MDP with mixing time , the following holds , any policy and .\n[17 ###reference_b17###, Corollary 13.2] Let be defined as written below for an arbitrary policy .\nIf , we have the following inequality : .\n[17 ###reference_b17###, Lemma 16] Let be a certain period of an epoch of Algorithm 2 ###reference_### with length . Then for any , the probability that the algorithm never visits in is upper bounded by\n[17 ###reference_b17###, Lemma 15] The difference of the values of the function , at policies and , is\n[6 ###reference_b6###, Lemma 7] The term for any is a good estimator of , which means\n[36 ###reference_b36###, Lemma A.6] Let be a policy parameter. Fix a trajectory generated by following the policy starting from some initial state . Let, be the gradient that we wish to estimate over , and is a function such that . Assume that , , . Define . If , then the following holds as long as ,\n[37 ###reference_b37###, Lemma 3]\n\nFor convenience, we rewrite the unparameterized problem (2 ###reference_###).\nDefine as the optimal solution to the above problem. Define the associated dual function as\nand denote . We have the following strong duality property for the unparameterized problem whenever Assumption 2 ###reference_umption2### holds.\nAlthough the strong duality holds for the unparameterized problem, the same is not true for parameterized class . To formalize this statement, define the dual function associated with the parameterized problem as follows.\nand denote . The lack of strong duality states that, in general, where is a solution of the parameterized constrained optimization (3 ###reference_###). However, the parameter , as we demonstrate below, must obey some restrictions.\nUnder Assumption 2 ###reference_umption2###, the optimal dual variable for the parameterized problem is bounded as\nThe proof follows the approach in [37 ###reference_b37###, Lemma 3], but is revised to the general parameterization setup. Let be a sublevel set of the dual function for . If is non-empty, then for any ,\nwhere is a Slater point in Assumption 2 ###reference_umption2###. Thus, . If we take , then we have , which proves the Lemma. The last inequality holds since for any policy, .\n\u220e\nSince the above inequality holds for arbitrary , we also have, . Define . Using the strong duality property of the unparameterized problem (97 ###reference_###), we establish the following property of the function, .\nAssume that the Assumption 2 ###reference_umption2### holds, we have for any ,\nBy the definition of , we have . With a slight abuse of notation, denote . By the strong duality stated in Lemma 15 ###reference_ma15###, we have the following for any .\nThus, for any ,\nMaximizing the right-hand side of this inequality over yields\nThis completes the proof of the lemma.\n\u220e\nWe note that a similar result was shown in [38 ###reference_b38###, Lemma 15]. However, the setup of the stated paper is different from that of ours. Specifically, [38 ###reference_b38###] considers a tabular setup with peak constraints. Note that Lemma 17 ###reference_ma17### has no direct connection with the parameterized setup since its proof uses strong duality and the function, , is defined via a constrained optimization over the entire policy set, , rather than the parameterized policy set. Interestingly, however, the relationship between and leads to the lemma stated below which turns out to be pivotal in establishing regret and constraint violation bounds in the parameterized setup.\nLet Assumption 2 ###reference_umption2### hold. For any constant , if there exists a and such that , then\nLet . Using the definition of , one can write,\nCombining Eq. (106 ###reference_6###) and (108 ###reference_8###), we obtain the following.\nThe condition in the Lemma leads to,\nFinally, we have,\nwhich completes the proof.\n\u220e"
|
| 72 |
+
}
|
| 73 |
+
],
|
| 74 |
+
"tables": {
|
| 75 |
+
"1": {
|
| 76 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S1.T1\">\n<div class=\"ltx_inline-block ltx_align_center ltx_transformed_outer\" id=\"S1.T1.14\" style=\"width:433.6pt;height:117.6pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(-48.6pt,13.2pt) scale(0.816959367327185,0.816959367327185) ;\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S1.T1.14.14\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S1.T1.14.14.15.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_l ltx_border_r ltx_border_t\" id=\"S1.T1.14.14.15.1.1\">Algorithm</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S1.T1.14.14.15.1.2\">Regret</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S1.T1.14.14.15.1.3\">Violation</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S1.T1.14.14.15.1.4\">Model-free</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S1.T1.14.14.15.1.5\">Setting</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S1.T1.2.2.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S1.T1.2.2.2.3\">Algorithm 1 in <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2402.02042v3#bib.bib6\" title=\"\">6</a>]</cite>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.1.1.1.1\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.2.2.2.2\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.2.2.2.4\">No</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.2.2.2.5\">Tabular</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.4.4.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S1.T1.4.4.4.3\">Algorithm 2 in <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2402.02042v3#bib.bib6\" title=\"\">6</a>]</cite>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.3.3.3.1\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.4.4.4.2\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.4.4.4.4\">No</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.4.4.4.5\">Tabular</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.6.6.6\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S1.T1.6.6.6.3\">UC-CURL and PS-CURL <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2402.02042v3#bib.bib5\" title=\"\">5</a>]</cite>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.5.5.5.1\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.6.6.6.2\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.6.6.6.4\">No</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.6.6.6.5\">Tabular</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.8.8.8\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S1.T1.8.8.8.3\">Algorithm 2 in <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2402.02042v3#bib.bib9\" title=\"\">9</a>]</cite>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.7.7.7.1\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.8.8.8.2\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.8.8.8.4\">No</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.8.8.8.5\">Linear MDP</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.10.10.10\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S1.T1.10.10.10.3\">Algorithm 3 in <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2402.02042v3#bib.bib9\" title=\"\">9</a>]</cite>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.9.9.9.1\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.10.10.10.2\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.10.10.10.4\">No</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.10.10.10.5\">Linear MDP</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.12.12.12\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S1.T1.12.12.12.3\">Triple-QA <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2402.02042v3#bib.bib8\" title=\"\">8</a>]</cite>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.11.11.11.1\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.12.12.12.2\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.12.12.12.4\">Yes</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S1.T1.12.12.12.5\">Tabular</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S1.T1.14.14.14\">\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_l ltx_border_r ltx_border_t\" id=\"S1.T1.14.14.14.3\">This paper</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S1.T1.13.13.13.1\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S1.T1.14.14.14.2\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S1.T1.14.14.14.4\">Yes</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S1.T1.14.14.14.5\">General Parameterization</td>\n</tr>\n</tbody>\n</table>\n</span></div>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 1: </span><span class=\"ltx_text\" id=\"S1.T1.16.1\" style=\"font-size:90%;\">This table summarizes the different model-based and mode-free state-of-the-art algorithms available in the literature for average reward CMDPs. We note that our proposed algorithm is the first to analyze the regret and constraint violation for average reward CMDP with general parametrization. Here, the parameter refers to the dimension of the feature map for linear MDPs.</span></figcaption>\n</figure>",
|
| 77 |
+
"capture": "Table 1: This table summarizes the different model-based and mode-free state-of-the-art algorithms available in the literature for average reward CMDPs. We note that our proposed algorithm is the first to analyze the regret and constraint violation for average reward CMDP with general parametrization. Here, the parameter refers to the dimension of the feature map for linear MDPs."
|
| 78 |
+
}
|
| 79 |
+
},
|
| 80 |
+
"image_paths": {},
|
| 81 |
+
"validation": true,
|
| 82 |
+
"references": [
|
| 83 |
+
{
|
| 84 |
+
"1": {
|
| 85 |
+
"title": "Cmix: Deep multi-agent reinforcement learning with peak and average constraints.",
|
| 86 |
+
"author": "Liu, C., N. Geng, et al.",
|
| 87 |
+
"venue": "In Machine Learning and Knowledge Discovery in Databases. Research Track: European Conference, ECML PKDD 2021, Bilbao, Spain, September 13\u201317, 2021, Proceedings, Part I 21, pages 157\u2013173. Springer, 2021.",
|
| 88 |
+
"url": null
|
| 89 |
+
}
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"2": {
|
| 93 |
+
"title": "Deeppool: Distributed model-free algorithm for ride-sharing using deep reinforcement learning.",
|
| 94 |
+
"author": "Al-Abbasi, A. O., A. Ghosh, V. Aggarwal.",
|
| 95 |
+
"venue": "IEEE Transactions on Intelligent Transportation Systems, 20(12):4714\u20134727, 2019.",
|
| 96 |
+
"url": null
|
| 97 |
+
}
|
| 98 |
+
},
|
| 99 |
+
{
|
| 100 |
+
"3": {
|
| 101 |
+
"title": "Cooperating graph neural networks with deep reinforcement learning for vaccine prioritization.",
|
| 102 |
+
"author": "Ling, L., W. U. Mondal, S. V. Ukkusuri.",
|
| 103 |
+
"venue": "arXiv preprint arXiv:2305.05163, 2023.",
|
| 104 |
+
"url": null
|
| 105 |
+
}
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"4": {
|
| 109 |
+
"title": "Achieving zero constraint violation for constrained reinforcement learning via conservative natural policy gradient primal-dual algorithm.",
|
| 110 |
+
"author": "Bai, Q., A. S. Bedi, V. Aggarwal.",
|
| 111 |
+
"venue": "In Proceedings of the AAAI Conference on Artificial Intelligence, pages 6737\u20136744. 2023.",
|
| 112 |
+
"url": null
|
| 113 |
+
}
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"5": {
|
| 117 |
+
"title": "Concave utility reinforcement learning with zero-constraint violations.",
|
| 118 |
+
"author": "Agarwal, M., Q. Bai, V. Aggarwal.",
|
| 119 |
+
"venue": "Transactions on Machine Learning Research, 2022.",
|
| 120 |
+
"url": null
|
| 121 |
+
}
|
| 122 |
+
},
|
| 123 |
+
{
|
| 124 |
+
"6": {
|
| 125 |
+
"title": "Learning infinite-horizon average-reward markov decision process with constraints.",
|
| 126 |
+
"author": "Chen, L., R. Jain, H. Luo.",
|
| 127 |
+
"venue": "In International Conference on Machine Learning, pages 3246\u20133270. PMLR, 2022.",
|
| 128 |
+
"url": null
|
| 129 |
+
}
|
| 130 |
+
},
|
| 131 |
+
{
|
| 132 |
+
"7": {
|
| 133 |
+
"title": "Regret guarantees for model-based reinforcement learning with long-term average constraints.",
|
| 134 |
+
"author": "Agarwal, M., Q. Bai, V. Aggarwal.",
|
| 135 |
+
"venue": "In Uncertainty in Artificial Intelligence, pages 22\u201331. PMLR, 2022.",
|
| 136 |
+
"url": null
|
| 137 |
+
}
|
| 138 |
+
},
|
| 139 |
+
{
|
| 140 |
+
"8": {
|
| 141 |
+
"title": "A provably-efficient model-free algorithm for infinite-horizon average-reward constrained markov decision processes.",
|
| 142 |
+
"author": "Wei, H., X. Liu, L. Ying.",
|
| 143 |
+
"venue": "In Proceedings of the AAAI Conference on Artificial Intelligence, pages 3868\u20133876. 2022.",
|
| 144 |
+
"url": null
|
| 145 |
+
}
|
| 146 |
+
},
|
| 147 |
+
{
|
| 148 |
+
"9": {
|
| 149 |
+
"title": "Achieving sub-linear regret in infinite horizon average reward constrained mdp with linear function approximation.",
|
| 150 |
+
"author": "Ghosh, A., X. Zhou, N. Shroff.",
|
| 151 |
+
"venue": "In The Eleventh International Conference on Learning Representations. 2023.",
|
| 152 |
+
"url": null
|
| 153 |
+
}
|
| 154 |
+
},
|
| 155 |
+
{
|
| 156 |
+
"10": {
|
| 157 |
+
"title": "On the theory of policy gradient methods: Optimality, approximation, and distribution shift.",
|
| 158 |
+
"author": "Agarwal, A., S. M. Kakade, J. D. Lee, G. Mahajan.",
|
| 159 |
+
"venue": "The Journal of Machine Learning Research, 22(1):4431\u20134506, 2021.",
|
| 160 |
+
"url": null
|
| 161 |
+
}
|
| 162 |
+
},
|
| 163 |
+
{
|
| 164 |
+
"11": {
|
| 165 |
+
"title": "Improved sample complexity analysis of natural policy gradient algorithm with general parameterization for infinite horizon discounted reward markov decision processes.",
|
| 166 |
+
"author": "Mondal, W. U., V. Aggarwal.",
|
| 167 |
+
"venue": "In International Conference on Artificial Intelligence and Statistics (AISTATS). 2024.",
|
| 168 |
+
"url": null
|
| 169 |
+
}
|
| 170 |
+
},
|
| 171 |
+
{
|
| 172 |
+
"12": {
|
| 173 |
+
"title": "Sample-efficient constrained reinforcement learning with general parameterization.",
|
| 174 |
+
"author": "\u2014.",
|
| 175 |
+
"venue": "arXiv preprint arXiv:2405.10624, 2024.",
|
| 176 |
+
"url": null
|
| 177 |
+
}
|
| 178 |
+
},
|
| 179 |
+
{
|
| 180 |
+
"13": {
|
| 181 |
+
"title": "Regret analysis of policy gradient algorithm for infinite horizon average reward markov decision processes.",
|
| 182 |
+
"author": "Bai, Q., W. U. Mondal, V. Aggarwal.",
|
| 183 |
+
"venue": "In Proceedings of the AAAI Conference on Artificial Intelligence. 2024.",
|
| 184 |
+
"url": null
|
| 185 |
+
}
|
| 186 |
+
},
|
| 187 |
+
{
|
| 188 |
+
"14": {
|
| 189 |
+
"title": "Natural policy gradient primal-dual method for constrained markov decision processes.",
|
| 190 |
+
"author": "Ding, D., K. Zhang, T. Basar, M. Jovanovic.",
|
| 191 |
+
"venue": "Advances in Neural Information Processing Systems, 33:8378\u20138390, 2020.",
|
| 192 |
+
"url": null
|
| 193 |
+
}
|
| 194 |
+
},
|
| 195 |
+
{
|
| 196 |
+
"15": {
|
| 197 |
+
"title": "Optimistic posterior sampling for reinforcement learning: worst-case regret bounds.",
|
| 198 |
+
"author": "Agrawal, S., R. Jia.",
|
| 199 |
+
"venue": "Advances in Neural Information Processing Systems, 30, 2017.",
|
| 200 |
+
"url": null
|
| 201 |
+
}
|
| 202 |
+
},
|
| 203 |
+
{
|
| 204 |
+
"16": {
|
| 205 |
+
"title": "Near-optimal regret bounds for reinforcement learning.",
|
| 206 |
+
"author": "Auer, P., T. Jaksch, R. Ortner.",
|
| 207 |
+
"venue": "Advances in neural information processing systems, 21, 2008.",
|
| 208 |
+
"url": null
|
| 209 |
+
}
|
| 210 |
+
},
|
| 211 |
+
{
|
| 212 |
+
"17": {
|
| 213 |
+
"title": "Model-free reinforcement learning in infinite-horizon average-reward markov decision processes.",
|
| 214 |
+
"author": "Wei, C.-Y., M. J. Jahromi, H. Luo, H. Sharma, R. Jain.",
|
| 215 |
+
"venue": "In International conference on machine learning, pages 10170\u201310180. PMLR, 2020.",
|
| 216 |
+
"url": null
|
| 217 |
+
}
|
| 218 |
+
},
|
| 219 |
+
{
|
| 220 |
+
"18": {
|
| 221 |
+
"title": "Achieving zero constraint violation for constrained reinforcement learning via primal-dual approach.",
|
| 222 |
+
"author": "Bai, Q., A. S. Bedi, M. Agarwal, A. Koppel, V. Aggarwal.",
|
| 223 |
+
"venue": "In Proceedings of the AAAI Conference on Artificial Intelligence, pages 3682\u20133689. 2022.",
|
| 224 |
+
"url": null
|
| 225 |
+
}
|
| 226 |
+
},
|
| 227 |
+
{
|
| 228 |
+
"19": {
|
| 229 |
+
"title": "Crpo: A new approach for safe reinforcement learning with convergence guarantee.",
|
| 230 |
+
"author": "Xu, T., Y. Liang, G. Lan.",
|
| 231 |
+
"venue": "In International Conference on Machine Learning, pages 11480\u201311491. PMLR, 2021.",
|
| 232 |
+
"url": null
|
| 233 |
+
}
|
| 234 |
+
},
|
| 235 |
+
{
|
| 236 |
+
"20": {
|
| 237 |
+
"title": "Exploration-exploitation in constrained mdps.",
|
| 238 |
+
"author": "Efroni, Y., S. Mannor, M. Pirotta.",
|
| 239 |
+
"venue": "arXiv preprint arXiv:2003.02189, 2020.",
|
| 240 |
+
"url": null
|
| 241 |
+
}
|
| 242 |
+
},
|
| 243 |
+
{
|
| 244 |
+
"21": {
|
| 245 |
+
"title": "Upper confidence primal-dual reinforcement learning for cmdp with adversarial loss.",
|
| 246 |
+
"author": "Qiu, S., X. Wei, Z. Yang, J. Ye, Z. Wang.",
|
| 247 |
+
"venue": "Advances in Neural Information Processing Systems, 33:15277\u201315287, 2020.",
|
| 248 |
+
"url": null
|
| 249 |
+
}
|
| 250 |
+
},
|
| 251 |
+
{
|
| 252 |
+
"22": {
|
| 253 |
+
"title": "A best-of-both-worlds algorithm for constrained mdps with long-term constraints.",
|
| 254 |
+
"author": "Germano, J., F. E. Stradi, G. Genalti, M. Castiglioni, A. Marchesi, N. Gatti.",
|
| 255 |
+
"venue": "arXiv preprint arXiv:2304.14326, 2023.",
|
| 256 |
+
"url": null
|
| 257 |
+
}
|
| 258 |
+
},
|
| 259 |
+
{
|
| 260 |
+
"23": {
|
| 261 |
+
"title": "Imed-rl: Regret optimal learning of ergodic markov decision processes.",
|
| 262 |
+
"author": "Pesquerel, F., O.-A. Maillard.",
|
| 263 |
+
"venue": "In NeurIPS 2022-Thirty-sixth Conference on Neural Information Processing Systems. 2022.",
|
| 264 |
+
"url": null
|
| 265 |
+
}
|
| 266 |
+
},
|
| 267 |
+
{
|
| 268 |
+
"24": {
|
| 269 |
+
"title": "A duality approach for regret minimization in average-award ergodic markov decision processes.",
|
| 270 |
+
"author": "Gong, H., M. Wang.",
|
| 271 |
+
"venue": "In Learning for Dynamics and Control, pages 862\u2013883. PMLR, 2020.",
|
| 272 |
+
"url": null
|
| 273 |
+
}
|
| 274 |
+
},
|
| 275 |
+
{
|
| 276 |
+
"25": {
|
| 277 |
+
"title": "Policy gradient methods for reinforcement learning with function approximation.",
|
| 278 |
+
"author": "Sutton, R. S., D. McAllester, S. Singh, Y. Mansour.",
|
| 279 |
+
"venue": "Advances in neural information processing systems, 12, 1999.",
|
| 280 |
+
"url": null
|
| 281 |
+
}
|
| 282 |
+
},
|
| 283 |
+
{
|
| 284 |
+
"26": {
|
| 285 |
+
"title": "Bandit algorithms.",
|
| 286 |
+
"author": "Lattimore, T., C. Szepesv\u00e1ri.",
|
| 287 |
+
"venue": "Cambridge University Press, 2020.",
|
| 288 |
+
"url": null
|
| 289 |
+
}
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"27": {
|
| 293 |
+
"title": "Optimality and approximation with policy gradient methods in markov decision processes.",
|
| 294 |
+
"author": "Agarwal, A., S. M. Kakade, J. D. Lee, G. Mahajan.",
|
| 295 |
+
"venue": "In Conference on Learning Theory, pages 64\u201366. PMLR, 2020.",
|
| 296 |
+
"url": null
|
| 297 |
+
}
|
| 298 |
+
},
|
| 299 |
+
{
|
| 300 |
+
"28": {
|
| 301 |
+
"title": "On the convergence and sample efficiency of variance-reduced policy gradient method.",
|
| 302 |
+
"author": "Zhang, J., C. Ni, C. Szepesvari, M. Wang.",
|
| 303 |
+
"venue": "Advances in Neural Information Processing Systems, 34:2228\u20132240, 2021.",
|
| 304 |
+
"url": null
|
| 305 |
+
}
|
| 306 |
+
},
|
| 307 |
+
{
|
| 308 |
+
"29": {
|
| 309 |
+
"title": "An improved analysis of (variance-reduced) policy gradient and natural policy gradient methods.",
|
| 310 |
+
"author": "Liu, Y., K. Zhang, T. Basar, W. Yin.",
|
| 311 |
+
"venue": "Advances in Neural Information Processing Systems, 33:7624\u20137636, 2020.",
|
| 312 |
+
"url": null
|
| 313 |
+
}
|
| 314 |
+
},
|
| 315 |
+
{
|
| 316 |
+
"30": {
|
| 317 |
+
"title": "Provably efficient reinforcement learning with linear function approximation.",
|
| 318 |
+
"author": "Jin, C., Z. Yang, Z. Wang, M. I. Jordan.",
|
| 319 |
+
"venue": "In J. Abernethy, S. Agarwal, eds., Proceedings of Thirty Third Conference on Learning Theory, vol. 125 of Proceedings of Machine Learning Research, pages 2137\u20132143. PMLR, 2020.",
|
| 320 |
+
"url": null
|
| 321 |
+
}
|
| 322 |
+
},
|
| 323 |
+
{
|
| 324 |
+
"31": {
|
| 325 |
+
"title": "Neural policy gradient methods: Global optimality and rates of convergence.",
|
| 326 |
+
"author": "Wang, L., Q. Cai, Z. Yang, Z. Wang.",
|
| 327 |
+
"venue": "In International Conference on Learning Representations. 2019.",
|
| 328 |
+
"url": null
|
| 329 |
+
}
|
| 330 |
+
},
|
| 331 |
+
{
|
| 332 |
+
"32": {
|
| 333 |
+
"title": "A general sample complexity analysis of vanilla policy gradient.",
|
| 334 |
+
"author": "Yuan, R., R. M. Gower, A. Lazaric.",
|
| 335 |
+
"venue": "In International Conference on Artificial Intelligence and Statistics, pages 3332\u20133380. PMLR, 2022.",
|
| 336 |
+
"url": null
|
| 337 |
+
}
|
| 338 |
+
},
|
| 339 |
+
{
|
| 340 |
+
"33": {
|
| 341 |
+
"title": "Stochastic policy gradient methods: Improved sample complexity for fisher-non-degenerate policies.",
|
| 342 |
+
"author": "Fatkhullin, I., A. Barakat, A. Kireeva, N. He.",
|
| 343 |
+
"venue": "In International Conference on Machine Learning, pages 9827\u20139869. PMLR, 2023.",
|
| 344 |
+
"url": null
|
| 345 |
+
}
|
| 346 |
+
},
|
| 347 |
+
{
|
| 348 |
+
"34": {
|
| 349 |
+
"title": "Mean-field control based approximation of multi-agent reinforcement learning in presence of a non-decomposable shared global state.",
|
| 350 |
+
"author": "Mondal, W. U., V. Aggarwal, S. V. Ukkusuri.",
|
| 351 |
+
"venue": "Transactions on Machine Learning Research, 2023.",
|
| 352 |
+
"url": null
|
| 353 |
+
}
|
| 354 |
+
},
|
| 355 |
+
{
|
| 356 |
+
"35": {
|
| 357 |
+
"title": "Constrained reinforcement learning with average reward objective: Model-based and model-free algorithms.",
|
| 358 |
+
"author": "Aggarwal, V., W. U. Mondal, Q. Bai.",
|
| 359 |
+
"venue": "Found. Trends Optim., 6(4):193\u2013298, 2024.",
|
| 360 |
+
"url": null
|
| 361 |
+
}
|
| 362 |
+
},
|
| 363 |
+
{
|
| 364 |
+
"36": {
|
| 365 |
+
"title": "Adapting to mixing time in stochastic optimization with markovian data.",
|
| 366 |
+
"author": "Dorfman, R., K. Y. Levy.",
|
| 367 |
+
"venue": "In International Conference on Machine Learning, pages 5429\u20135446. PMLR, 2022.",
|
| 368 |
+
"url": null
|
| 369 |
+
}
|
| 370 |
+
},
|
| 371 |
+
{
|
| 372 |
+
"37": {
|
| 373 |
+
"title": "Convergence and sample complexity of natural policy gradient primal-dual methods for constrained mdps.",
|
| 374 |
+
"author": "Ding, D., K. Zhang, J. Duan, T. Ba\u015far, M. R. Jovanovi\u0107.",
|
| 375 |
+
"venue": "arXiv preprint arXiv:2206.02346, 2022.",
|
| 376 |
+
"url": null
|
| 377 |
+
}
|
| 378 |
+
},
|
| 379 |
+
{
|
| 380 |
+
"38": {
|
| 381 |
+
"title": "Provably sample-efficient model-free algorithm for mdps with peak constraints.",
|
| 382 |
+
"author": "Bai, Q., V. Aggarwal, A. Gattami.",
|
| 383 |
+
"venue": "Journal of Machine Learning Research, 24(60):1\u201325, 2023.",
|
| 384 |
+
"url": null
|
| 385 |
+
}
|
| 386 |
+
}
|
| 387 |
+
],
|
| 388 |
+
"url": "http://arxiv.org/html/2402.02042v3"
|
| 389 |
+
}
|
20241030/2402.02518v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2402.03492v3.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2402.04646v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2402.05369v3.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2402.05379v3.json
ADDED
|
@@ -0,0 +1,645 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Trade-Offs of Diagonal Fisher Information Matrix Estimators footnote FootnoteFootnoteFootnotesFootnotesfootnoteThe current article is a digital reprint of Soen and Sun (2024).",
|
| 3 |
+
"abstract": "The Fisher information matrix can be used to characterize the local geometry of\nthe parameter space of neural networks. It elucidates insightful theories and\nuseful tools to understand and optimize neural networks. Given its high\ncomputational cost, practitioners often use random estimators and evaluate only\nthe diagonal entries. We examine two popular estimators whose accuracy and sample\ncomplexity depend on their associated variances. We derive bounds of the\nvariances and instantiate them in neural networks for regression and\nclassification. We navigate trade-offs for both estimators based on analytical\nand numerical studies. We find that the variance quantities depend on the\nnon-linearity w.r.t. different parameter groups and should not be neglected when\nestimating the Fisher information.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Settings",
|
| 9 |
+
"text": "In the parameter space of neural networks (NNs),\ni.e. the neuromanifold Amari (2016 ###reference_b1###),\nthe network weights and biases play the role of a coordinate system and the local metric tensor\ncan be described by the Fisher Information Matrix (FIM).\nAs a result, empirical estimation of the FIM helps reveal the geometry of the loss landscape\nand the intrinsic structure of the neuromanifold.\nUtilizing these insights has lead to efficient optimization\nalgorithms, e.g., the natural gradient Amari (2016 ###reference_b1###) and Adam Kingma and Ba (2015 ###reference_b16###).\nA NN with inputs and stochastic outputs \ncan be specified by a conditional p.d.f. , where \nis the NN\u2019s weights and biases. This paper considers the general\nparametric form\nwhere maps -dimensional inputs to -dimensional exponential family parameters, is a vector of sufficient statistics, is a base measure, and is the log-partition function (normalizing the exponential).\nFor example, if denotes class labels and maps to its corresponding one-hot vectors, then Eq. 1 ###reference_### is associated with a multi-class classification network.\nAssuming that the marginal distribution is parameter-free,\nwe define parametric joint distributions .\nThe (joint) FIM is defined as ,\nwhere\nis the \u2018conditional FIM\u2019.\nThe second equality (*) holds if \u2019s activation functions are in (i.e., is a sufficiently smooth NN).\n does not have this equivalent expression (*) for NNs with ReLU activation functions Soen and Sun (2021 ###reference_b37###).\nBoth and define positive semi-definite (PSD) matrices.\nThe distinction in notation is to emphasize that the joint FIM (depending only on ) is simply the average over individual conditional FIMs (depending on both and ).\nIn practice, the FIM is typically computationally expensive and needs to be estimated.\nGiven and a NN with weights and biases parameterizing , as per Eq. 1 ###reference_###,\nwe consider two commonly used estimators of the FIM Guo and Spall (2019 ###reference_b11###); Soen and Sun (2021 ###reference_b37###) given by\nwhere and are i.i.d. sampled from . A\nconditional variant of the estimators, denoted as and , can be defined by\nfixing and sampling\n independently from in\nEq. 3 ###reference_### \u2014 details omitted for brevity.\nBoth estimators, and ,\nare random matrices with the same shape as . By Eq. 2 ###reference_###, they are unbiased \u2014 for , this only holds if activations functions are in .\nFollowing Eq. 1 ###reference_###\u2019s setting,\nthe estimation variances of and \ncan be expressed in closed form and upper bounded Soen and Sun (2021 ###reference_b37###). This provides an important, yet not widely discussed, tool for quantifying the estimators\u2019 accuracy Guo and Spall (2019 ###reference_b11###) and hence insights for where / when different estimators should be used.\nDespite this, for deep NNs, neither these variances nor their bounds can be computed efficiently due to the huge dimensionality of .\nThis work focuses on estimating the diagonal entries of the FIM and their associated variances.\nOur results \u2014 including estimators of the FIM, their variances, and their variance bounds \u2014 can be implemented through automatic differentiation.\nThese computational tools empower us to practically explore the trade-offs between the two estimators.\nFor example, Fig. 1 ###reference_### shows natural gradient descent Amari (2016 ###reference_b1###) for generalized linear models on a toy dataset, where is preferable (especially for regression) and suffers from high variance and an unstable learning curve.\nOur analytical results reveal how moments of the output exponential family and gradients of the NN in Eq. 1 ###reference_### affects the FIM estimators. We discover a general decomposition of the estimators\u2019 variances corresponding to the samples of and . We investigate different scenarios where each FIM estimator is the preferred one and then connect our analysis to the empirical FIM.\n###figure_1###"
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Related Work",
|
| 15 |
+
"text": "Prior efforts aim to analyze the structure of the FIM of NNs with random weights Pennington and Worah (2018 ###reference_b34###); Karakida et al. (2019 ###reference_b14###; 2021 ###reference_b15###); Amari et al. (2019 ###reference_b3###); Papyan (2020 ###reference_b31###).\nThis body of work hinges on utilizing tools from random matrix theory and spectral analysis,\ncharacterizing the behavior and statistics of the FIM.\nOne insight is that randomly weighted NNs have FIMs with a majority of eigenvalues close to zero; with the other eigenvalues taking large values Karakida et al. (2019 ###reference_b14###; 2021 ###reference_b15###).\nIn our work, the randomness stems from sampling from data distributions \u2014 which follows the principle of Monte Carlo (MC) information geometry Nielsen and Hadjeres (2019 ###reference_b29###) that approximates information geometric quantities via MC estimation. We examine a different subject on how the distribution of the FIM on a matrix manifold is affected by finite sampling of the data distribution.\nIn the literature of NN optimization, a main focus is on deriving a computationally friendly proxy for the FIM.\nOne can consider the unit-wise FIM Ollivier (2015 ###reference_b30###); Le Roux et al. (2007 ###reference_b20###); Sun and Nielsen (2017 ###reference_b40###); Amari et al. (2019 ###reference_b3###)\n(also known as quasi-diagonal FIM Ollivier (2015 ###reference_b30###)), where a block-diagonal approximation of the FIM is taken to capture intra-neuron curvature information.\nOr one can consider the block-diagonal layer-wise FIM where each block corresponds to parameters within a layer\n Kurita (1994 ###reference_b19###); Martens et al. (2010 ###reference_b27###); Pascanu and Bengio (2014 ###reference_b32###); Martens and Grosse (2015 ###reference_b26###); Heskes (2000 ###reference_b12###); Ren and Goldfarb (2021 ###reference_b35###); Karakida and Osawa (2020 ###reference_b13###).\nNN optimizers can approximate the inverse FIM Singh and Alistarh (2020 ###reference_b36###) or approximate the product of the inverse FIM and the gradient vector Ren and Goldfarb (2021 ###reference_b35###).\nMuch less attention is paid to how related approximations deviate from the true FIM Guo and Spall (2019 ###reference_b11###); Soen and Sun (2021 ###reference_b37###) or how optimization is affected by such deviation Sun and Spall (2021 ###reference_b42###).\nFor the univariate case, one can study the asymptotic variance of the Fisher\ninformation Guo and Spall (2019 ###reference_b11###) with the central limit theorem. In deep NNs,\nthe estimation variance of the FIM can be derived in closed form and bounded Soen and Sun (2021 ###reference_b37###).\nHowever, our former analysis Soen and Sun (2021 ###reference_b37###) has two limitations:\n(1) the variance tensors are 4D and can not be easily computed;\n(2) only the norm of these tensors are bounded, and\nit is not clear how the variance is distributed among individual parameters.\nThe current work tackles these limitations by focusing on the diagonal elements of the FIM.\nOur results can be computed numerically at a reasonable cost in typical learning settings.\nWe provide novel bounds so that one can quantify\nthe accuracy of the FIM computation w.r.t. individual parameters or subgroup of parameters.\nIssues of utilizing the empirical FIM to approximate the FIM have been highlighted Pascanu and Bengio (2014 ###reference_b32###); Martens (2020 ###reference_b25###). For example, estimators of the FIM do not in general capture any second-order information about the log-likelihood Kunstner et al. (2020 ###reference_b18###).\nThe empirical FIM is a biased estimator and can be connected with our unbiased estimators\nvia a generalized definition of the Fisher matrix in Section 6 ###reference_###.\nAlternative to the FIM, the Generalized Gauss-Newton (GGN) matrix \u2014 a Hessian approximator \u2014 was originally motivated through the squared loss for non-linear models Martens (2020 ###reference_b25###). The GGN is equivalent to the FIM when a loss function is taken to be the empirical expectation of the negative log-likelihood of Eq. 1 ###reference_### Heskes (2000 ###reference_b12###); Pascanu and Bengio (2014 ###reference_b32###); Martens (2020 ###reference_b25###)."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "3",
|
| 19 |
+
"parent_section_id": null,
|
| 20 |
+
"section_name": "Variance of Diagonal FIM Estimators",
|
| 21 |
+
"text": "In our notations, all vectors such as , , and are column vectors.\nWe use to index random samples and \nand use and to index the NN weights and biases .\nWe shorthand , , and whenever the parameters is clear from context.\nTo be consistent, we use \u2018 conditioning\u2019 to distinguish between jointly calculated values versus conditioned values with fixed .\nBy default, the derivatives are w.r.t. . For example,\n and\n.\nWe adopt Einstein notation to express tensor summations,\nso that an index appearing as both a subscript and a superscript in the same term indicates a summation.\nFor example, denotes .\nFor clarity, we mix standard -sum and Einstein notation.\nWe denote the variance and covariance of random variables by and , respectively.\nBased on the parametric form of the model in Eq. 1 ###reference_###, the diagonal entries of the FIM estimators in Eq. 3 ###reference_### can be written as111This and subsequent derivations can be found in the appendix.:\nCorrespondingly, the \u2019th diagonal entry of the FIM , which\nis the expected value of and ,\nis denoted as .\nNotation is abused in\n, , and \nas they depend on the whole vector rather than solely on .\nClearly , while there is no guarantee for\n which can be negative.\nOur results will be expressed in terms of the (central) moments of :\nwhere \u201c\u201d denotes the tensor product.\nWe denote the covariance of w.r.t. to as \u2014 noting that .\nThe 4D tensor denotes the\n central moment of w.r.t. .\nThese central moments correspond to the cumulants of ,\ni.e. the derivatives of w.r.t. the natural parameters \nof the exponential family.\nTherefore, the derivatives of in and can further be\nwritten in terms of and \nfollowing the chain rule.\nPractically, and involves computing the Jacobian and the Hessian , respectively.\nIn practice, both estimators can be computed via automatic differentiation Paszke et al. (2019 ###reference_b33###); Dangel et al. (2020 ###reference_b6###).\nIn terms of complexity, by restricting to just the diagonal elements ,\nwe need to calculate elements (originally for the full FIM). Although the log-partition function for general exponential family distributions can be complicated, for the ones used in NNs\n(determined by the loss functions used in optimization) Soen and Sun (2021 ###reference_b37###)\nthe log-partition function is usually in closed-form; and thus the cumulants and \ncan be calculated efficiently.\nIndeed, the primary cost of the estimators comes from evaluating the gradient information of\nthe NN, given by \nand .\nThe former can be calculated easily. The latter is costly even when restricted to the diagonal elements\nof the FIM. With the Hessian\u2019s quadratic complexity, in practice approximations are used to reduce\nthe computational overhead Becker et al. (1988 ###reference_b4###); Yao et al. (2020 ###reference_b46###; 2021 ###reference_b47###); Elsayed and Mahmood (2022 ###reference_b8###).\nIn this case, additional error and (potentially) variance may be introduced as a result of the Hessian approximation.\nNote, the computational cost of the Hessian can still be manageable for the last few layers close to the output.\nBy the chain rule, we only require a sub-computational graph from the output\nlayer to a certain layer to compute the Hessian of that layer.\nDespite this, there is still a memory cost that scales quadratically with the number of parameters for non-linear activation functions (Dangel et al., 2020 ###reference_b6###).\nThe high cost of Hessian computation does not justify refraining from using .\nDepending on the setting (chosen loss function), an estimator\u2019s variance can outweigh the benefits of lower computational costs Soen and Sun (2021 ###reference_b37###).\nThis is especially true when the FIM is used in an offline setting \u2014 where the Hessian\u2019s cost can be tolerated \u2014 to\nstudy, e.g., the singular structure of the neuromanifold Amari et al. (2018 ###reference_b2###); Sun and Nielsen (2019 ###reference_b41###),\nthe curvature of the loss Efron (2018 ###reference_b7###), to quantify model\nsensitivity Nickl et al. (2023 ###reference_b28###), and to evaluate the quality of the local\noptimum Karakida et al. (2019 ###reference_b14###; 2021 ###reference_b15###), etc.\nTo study the quality of and ,\nit is natural to examine the variance of the estimators Soen and Sun (2021 ###reference_b37###):\n,\nwhere \n() is the \u2019th diagonal element of .\nSimilar to and ,\n and \ndepend on the vector and are abuses of notation.\nAn estimator with a smaller variance indicates that it is more accurate and more likely to be close to the true FIM.\nBased on the variance, one can derive sample complexity bounds\nof the diagonal FIM via Chebyshev\u2019s inequality, see for instance Soen and Sun (2021 ###reference_b37###, Section 3.4).\nBy its definition, has a simple closed form,\nwhich was proved in Soen and Sun (2021 ###reference_b37###) and is restated below.\n,\n,\nGiven a fixed , both \nand have an order of ,\nwith denoting the number of samples of .\nThey further depend on two factors: \u2460 the derivatives of the\nparameter-output mapping stored in a \nmatrix, either or ,\nwhere the latter can be expensive to calculate;\nand \u2461 the central moments of ,\nwhose computation only scales with (the number of output units)\nand is independent to .\nFrom an information geometry Amari (2016 ###reference_b1###) perspective, , ,\nand are all pullback tensors of different orders.\nFor example, is the pullback tensor of \nand the singular semi-Riemannian metric Sun and Nielsen (2019 ###reference_b41###).\nThey induce the geometric structures of the neuromanifold (parameterized by\n) based on the corresponding low dimensional structures of the\nexponential family (parameterized by )."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "4",
|
| 25 |
+
"parent_section_id": null,
|
| 26 |
+
"section_name": "Practical Variance Estimation",
|
| 27 |
+
"text": "To further understand the dependencies of the derivative and central moment terms,\nthe FIM and variances of estimators \ncan be bounded to strengthen intuition and to provide a computationally\nconvenient proxy of the interested quantities.\n,\nwhere\n;\n / denotes the minimum / maximum matrix eigenvalue;\nand are defined as\nTo help ground Theorem 4.1 ###reference_theorem1###, we summarize different sufficient statistics quantities for common learning settings in Table 1 ###reference_### \u2014 with further learning setting implications presented in Section 5 ###reference_###.\nNote that Eqs. 8 ###reference_### and 9 ###reference_### (and many subsequent results) can be further generalized for off-diagonal elements. See Appendix C ###reference_### for details.\nCompared to prior work Soen and Sun (2021 ###reference_b37###), Theorem 4.1 ###reference_theorem1###\nprovides bounds for individual elements of the variance tensors, where\nthe NN weights (the derivatives) and sufficient statistics (the eigenvalues)\nare neatly disentangled into a product.\nFrom a technical point of view, this comes from a difference in proof technique:\nwe utilize variational definitions and computations of eigenvalues\nto establish bounds whereas Soen and Sun (2021 ###reference_b37###) primarily applies H\u00f6lder\u2019s inequality.\nWe stress that and in Eq. 10 ###reference_### correspond to tensor eigenvalues iff is a supersymmetric tensor Lim (2005 ###reference_b23###) (a.k.a. totally symmetric tensor), i.e., indices are permutation invariant. In this case, Eq. 10 ###reference_### is exactly the maximum and minimum Z-eigenvalues. These variational forms mirror the Courant-Fischer min-max theorem for symmetric matrices Tao (2012 ###reference_b43###). In the case of Eq. 8 ###reference_###, with , the tensor is not a supersymmetric tensor in general.\nDespite this, we note that the lower bound of Eq. 8 ###reference_### is non-trivial.\nA weaker bound than Eq. 8 ###reference_###\ncan be established based on the Z-eigenvalue of the\nsupersymmetric tensor .\n,\nThe tensor eigenvalue is typically expensive to calculate.\nHowever in our case, the eigenvalues and on the RHS of\nEqs. 11 ###reference_### and 12 ###reference_### can be calculated via Kolda and Mayo (2014 ###reference_b17###)\u2019s method with complexity.\nIn this paper, we assume is reasonably bounded\nand are mainly concerned with the complexity w.r.t. . From this perspective, all our bounds scale linearly w.r.t. , and thus can be computed efficiently.\nWhen is bounded (e.g. in classification), we can upper bound with , which is easier to calculate.\nSuppose . Then,\nAs long as the sufficient statistics has bounded norm ,\nwe have that . A similar lower bound\ncan be established for the minimum tensor eigenvalue , but this ends up being trivial when applying Corollary 4.2 ###reference_theorem2###\u2019s lower bound, Eq. 11 ###reference_###.\nExamining Theorem 4.1 ###reference_theorem1### reveals several trade-offs.\nAn immediate observation is that the first order gradients of \ncorrespond to the robustness of to parameter misspecification (w.r.t. an input ).\nAs such, from the bounds in Eqs. 7 ###reference_### and 8 ###reference_###, the scale of and will be large when small shifts in parameter space yield large changes in the output .\nAnother observation is how the spectrum of affects the scale of and the estimator variances.\nIn particular, when increases, the scale of decreases but the scale of and increases.\nWhen decreases, then the opposite scaling occurs.\nWith these two observations, there is a tension in how the scale of follows the different variances and .\nThe element-wise FIM follows in terms of the scale of NN derivatives ; at the same time, follows in terms of the spectrum of sufficient statistics moment .\nTypically, is the linear output units: ,\nwhere is the weights of the last layer, and is\nthe second last layer\u2019s output. We have\n for\nany in . A smaller variance \nis guaranteed for the last layer regardless of the choice of the exponential family in Eq. 1 ###reference_###.\ndefines the NN mapping w.r.t. the \u2019th neuron in the second last layer,\nwhere and are\nincoming and outgoing links of the interested neuron, respectively;\n is the output of the third last layer;\nand is the activation function.\nThe \u2018constants\u2019 and denote an aggregation of all terms which are independent of and in their respective layers.\nThe Hessian of w.r.t. is\n.\nBy Theorem 4.1 ###reference_theorem1###, \ncan be arbitrarily small depending on\n.\nFor example, if , then\n.\nIn this case, for a neuron in the second last layer, a sufficient condition for \n(and having favored against ) is for\nthe neuron\u2019s pre-activation.\nWhen the pre-activation value is saturated ( or ), we also have that .\nAlternatively, suppose that , a continuous relaxation of , then where .\nThen a sufficient condition for with for a neuron in the second last layer is .\nThese observations are further clarified by looking at related quantities over multiple parameters.\nSo far we have only examined the variance of the FIM element-wise w.r.t. parameters . To study all parameters jointly, we consider the trace variances of the FIM estimators: for any ,\n\ndenotes the trace of the covariance matrix of\n, where extracts a matrix\u2019s diagonal elements into a column vector.\nWe present upper bounds of these joint quantities.\nFor any ,\nwhere\n\nand\n is the Frobenius norm.\nThis upper bound comes from integrating the parameter-wise variances in Theorem 4.1 ###reference_theorem1### and incorporating a trace variance bound which utilizes the full spectrum of the NN derivatives and sufficient statistics quantities. This is fully depicted in Theorem D.1 ###reference_theorem1###.\nLower bounds can also be derived in terms of singular values (deferred to the Appendix).\nNote the upper bounds in Corollary 4.6 ###reference_theorem6### can be improved by expressing the function\u2019s first term with singular value quantities.\nHaving the function in Corollary 4.6 ###reference_theorem6### is helpful as it shows a trade-off between two upper bounds: the scale of NN derivatives and versus the spectrum of the sufficient statistic terms. In the case of Eqs. 13 ###reference_### and 15 ###reference_###, the trace of is exactly the sum of all eigenvalues, including .\nThis can be helpful when the scale of the NN derivatives are not bounded by a small value.\nIt should be noted that, by the chain rule, these NN derivatives scale with the overall sharpness / flatness (Li et al., 2018 ###reference_b22###) of the landscape of the loss,\ni.e., the log-likelihood of Eq. 1 ###reference_###.\nFor NNs with large derivatives, the first term of the could yield tight bounds\nof the variance, and one can therefore\navoid dealing with the quadratic scaling of in the second term.\nOn the other hand, if the sharpness of the NN can be controlled, e.g. via sharpness aware minimization (Foret et al., 2021 ###reference_b10###), then one can benefit from the second term of the \nand avoid computing the full spectrum of in the first term."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "5",
|
| 31 |
+
"parent_section_id": null,
|
| 32 |
+
"section_name": "Case Studies",
|
| 33 |
+
"text": "To make our theoretic results more concrete, we consider regression and classification settings,\nwhich correspond to specifying the exponential family in Eq. 1 ###reference_### to an isotropic Gaussian distribution and a\ncategorical distribution, respectively. We include an empirical analysis of NNs trained on MNIST.\nNotably, our analysis considers general multi-dimensional NN output.\nThis extends the case studies of Soen and Sun (2021 ###reference_b37###) which was limited to 1D\ndistributions due to the limitations of their bounds (and their associated\ncomputational costs of dealing with a 4D tensor of the full covariance)."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "6",
|
| 37 |
+
"parent_section_id": null,
|
| 38 |
+
"section_name": "Relationship with the \u201cEmpirical Fisher\u201d",
|
| 39 |
+
"text": "In some scenarios, even the estimators of the diagonal FIM and can be prohibitively expensive.\nPart of the cost comes from requiring label samples for each , as per Eq. 3 ###reference_###. For example, when the FIM is used in an iterative optimization procedure, \u2019s need to be re-sampled at each learning step w.r.t. the current \nalongside their backpropagation (accounting for sampling).\nAs such, alternative \u2018FIM-like\u2019 objects have been explored which replace the samples from with samples from an underlying true (but unknown) data distribution Le Roux et al. (2007 ###reference_b20###); Martens et al. (2010 ###reference_b27###). We define the data\u2019s joint distribution as .\nAnalogous to the FIM, the data Fisher information matrix (DFIM) can be defined as the PSD tensor , with\nwhere denotes the 2nd (non-central) moment of w.r.t. ,\nand \nis the Jacobian of the map .\nIn the special case that , then\n becomes exactly .\nThe DFIM in Eq. 17 ###reference_### is a more general definition.\nCompared to the FIM , it yields a different PSD\ntensor on the parameter space (the neuromanifold) depending on a\ndistribution , which is neither necessarily on the same\nneuromanifold\nnor necessarily parametric at all.\nThe asymmetry in the true data distribution and the empirical one results in different geometric structures (Critchley et al., 1993 ###reference_b5###).\nBy definition, we have\n,\nwhere is the Kullback-Leibler (KL) divergence, or the loss in a parameter learning scenario.\nThe DFIM can be regarded as a surrogate function of the squared gradient of the KL divergence.\nIt is a symmetric covariant tensor and satisfies the same rule w.r.t. reparameterization as the FIM.\nConsider the reparameterization , the DFIM becomes\n.\nNotice that in general. As such, there will be a miss-match when utilizing as a substitute for .\nHowever, as learning progresses and becomes more similar to the data\u2019s true labeling posterior , the DFIM will become closer to the FIM.\nIf is defined by the observed samples,\nDFIM gives the widely used \u201cEmpirical Fisher\u201d (Martens, 2020 ###reference_b25###),\nwhose diagonal entries are\nwhere are i.i.d. sampled from . Similar to , an estimator with a fixed input can be considered, denoted as .\nGiven the computational benefits of using the data directly \u2014 bypassing a separate sampling routine \u2014 many popular optimization methods employ the empirical Fisher or its approximation. For instance, the Adam optimizer Kingma and Ba (2015 ###reference_b16###)\nuses the empirical Fisher to approximate the diagonal FIM.\nHowever, switching from sampling to is anything but superficial (Martens, 2020 ###reference_b25###, Chapter 11) \u2014 is not an unbiased estimator of as\n is different from .\nThe biased nature of the empirical Fisher affects the other moments as well. In particular, we do not have the same equivalence of covariance and the metric being pulled back by Sun (2020 ###reference_b39###).\nGiven the conditional data distribution , the covariance of given is given by\nwhere .\nAs a result, although the variance of the estimator takes a similar form to (i.e., Eq. 8 ###reference_###), its sufficient statistic terms do not exclusively consist of central moments. Noting the miss-match in , Lemma 6.1 ###reference_theorem1### reveals an additional term which shifts away from the 2nd central moment of (w.r.t. ). Instead, these sufficient statistic terms correspond to non-central moments of .\nSome corresponding empirical Fisher / DFIM bounds are characterized in Appendix G ###reference_###."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "7",
|
| 43 |
+
"parent_section_id": null,
|
| 44 |
+
"section_name": "Conclusion",
|
| 45 |
+
"text": "We have analyzed two different estimators and for the diagonal entries of the FIM. The variances of\nthese estimators are determined by both the non-linearly of the neural\nnetwork and the moments of the exponential family.\nWe have identified distinct scenarios on which estimator is preferable.\nFor example, ReLU networks can only apply due to a lack of smoothness. As another\nexample, has zero variance in the last layer and thus is\nalways preferable than . Similarly, in the second last\nlayer, has a simple closed form and potentially preferable\nfor neurons in their linear regions (see Remark 4.5 ###reference_theorem5###).\nIn general, one has to apply Theorem 4.1 ###reference_theorem1### based on their\nspecific neural network and settings and choose the estimator with the smaller\nvariance.\nOur results suggest that, from a variance perspective, uniformly\nutilizing one of the FIM estimators is often suboptimal in NNs.\nOur work has further extended from analyzing the conditional FIM estimators to the joint FIM estimators ; and we have examined the relationship between the investigated estimators and the empirical Fisher.\nFuture directions include extending the analysis of the variance of FIM estimators to block\ndiagonals (e.g. Martens and Grosse (2015 ###reference_b26###); Ren and Goldfarb (2021 ###reference_b35###))\nand adapting current NN optimizers (e.g. Kingma and Ba (2015 ###reference_b16###)) to incorporate the variance of FIM estimators."
|
| 46 |
+
}
|
| 47 |
+
],
|
| 48 |
+
"appendix": [
|
| 49 |
+
{
|
| 50 |
+
"section_id": "Appendix x1",
|
| 51 |
+
"parent_section_id": null,
|
| 52 |
+
"section_name": "Table of Contents",
|
| 53 |
+
"text": "Additional Results\n Appendix A ###reference_###: Natural Gradient Toy Data Example ###reference_### Pg A ###reference_###\n Appendix B ###reference_###: The Conditional Variances in Closed Form ###reference_### Pg B ###reference_###\n Appendix C ###reference_###: Off-Diagonal Variance ###reference_### Pg C ###reference_###\n Appendix D ###reference_###: Bounding the Trace Variance by Full Spectrum ###reference_### Pg D ###reference_###\n Appendix E ###reference_###: Second Central Moment of Categorical Distribution ###reference_### Pg E ###reference_###\n Appendix F ###reference_###: Empirical Results Continued ###reference_### Pg F ###reference_###\n Appendix G ###reference_###: \u201cEmpirical Fisher\u201d Continued ###reference_### Pg G ###reference_###\nProof\n Appendix H ###reference_###: Derivation of Eq. 3 Using Log-Partition Function Derivatives ###reference_### Pg H ###reference_###\n Appendix I ###reference_###: Proof of Eq. 7 ###reference_### Pg I ###reference_###\n Appendix J ###reference_###: Proof of Eq. 8 ###reference_### Pg J ###reference_###\n Appendix K ###reference_###: Proof of Eq. 9 ###reference_### Pg K ###reference_###\n Appendix L ###reference_###: Proof of Corollary 4.2 ###reference_### Pg L ###reference_###\n Appendix M ###reference_###: Proof of Proposition 4.3 ###reference_### Pg M ###reference_###\n Appendix N ###reference_###: Proof of Corollary 4.6 ###reference_### Pg N ###reference_###\n Appendix O ###reference_###: Proof of Theorem 4.7 ###reference_### Pg O ###reference_###\n Appendix P ###reference_###: Proof of Lemma 4.8 ###reference_### Pg P ###reference_###\n Appendix Q ###reference_###: Proof of Proposition 5.1 ###reference_### Pg Q ###reference_###\n Appendix R ###reference_###: Proof of Theorem 5.2 ###reference_### Pg R ###reference_###\n Appendix S ###reference_###: Proof of Lemma 6.1 ###reference_### Pg S ###reference_###\n Appendix T ###reference_###: Proof of Corollary G.1 ###reference_### Pg T ###reference_###\n Appendix U ###reference_###: Proof of Corollary G.2 ###reference_### Pg U ###reference_###"
|
| 54 |
+
},
|
| 55 |
+
{
|
| 56 |
+
"section_id": "Appendix 1",
|
| 57 |
+
"parent_section_id": null,
|
| 58 |
+
"section_name": "Appendix A Natural Gradient Toy Data Example",
|
| 59 |
+
"text": "The following section describes the data and models of Fig. 1 ###reference_###.\nIn general, the toy data and models constructed consists of taking 1D output\nsetting presented by Section 5 ###reference_###, where the NN \nis a linear function.\nThe 2D input data is sampled from a simple isotropic centered\nGaussian .\nA linear response variable is defined by the following:\nThe outputs of for the cases of regression and classification are\ndifferentiated by how is used in sampling:\nwhere is the logistic function.\nThe model consists of a linear\nfunction; and the exponential family Eq. 1 ###reference_### is chosen to be a 1D\nisotropic Gaussian and binary multinomial distribution (Bernoulli) for\nregression and classification, respectively. This corresponds to\nSection 5 ###reference_### for 1D outputs. Notice that the model exactly\nmatches the data generating function.\nNatural gradient descent (NGD) is taken using both and\n. The estimated FIM utilize only a single \nsample for each input .\nWe use a learning rate of over \nepochs. A training set of data points are sampled. At each iteration of\nNGD, we sample random points from the training set for the update.\nThe test loss is evaluated on a test set of data points sampled.\nLarger version of Fig. 1 ###reference_### with additional variance sum plotted over\ntime is given by Fig. I ###reference_###.\nNote that variance sum is including off diagonals. Further note that the\nvariance is calculated over joint sample in .\n###figure_2### We further present other random seed of the teaser plot in Figs. II ###reference_###, III ###reference_### and IV ###reference_###.\n###figure_3### ###figure_4### ###figure_5###"
|
| 60 |
+
},
|
| 61 |
+
{
|
| 62 |
+
"section_id": "Appendix 2",
|
| 63 |
+
"parent_section_id": null,
|
| 64 |
+
"section_name": "Appendix B The Conditional Variances in Closed Form",
|
| 65 |
+
"text": "We consider the diagonal entries of the conditional FIM and the conditional variances of its estimators in closed form.\nThe proof directly follows from Soen and Sun [2021 ###reference_b37###, Equation 6], Soen and Sun [2021 ###reference_b37###, Theorem 4], and Soen and Sun [2021 ###reference_b37###, Theorem 6].\nIn what follows, we provide a proof of the Lemma utilizing the notation of this paper for completeness.\nWe prove the statement one equation at a time.\nFor Eq. 4 ###reference_###, we consider the following computation.\nUsing Einstein notation and restricting the partial derivative to a component of yields the desired result.\nFor Eq. 5 ###reference_###, we shorthand .\nNote that the estimator can be written as follows:\nThus, we have\nLet us compute each of these terms.\nAnd,\nSimplifying all term yields the result as required.\nFinally, for Eq. 6 ###reference_### we consider the following simplification of the estimator.\nwhere the last line follows from Soen and Sun [2021 ###reference_b37###, Lemma 2] (a result of following an exponential family, see Amari [2016 ###reference_b1###]).\nNotice that the first quantity is a constant w.r.t. the randomness of . As such, we can simplify the variance calculation as follows.\nwhere the second last line follows from the fact that and thus .\nThis yields the desired result.\n\u220e\nLemma 3.1 ###reference_theorem1### shows that, for the former, only depends on 1st order derivatives; while only depends on the 2nd order derivatives. For the latter, depends on both the 2nd and 4th central moments of ; while only depends on the 2nd central moments.\nGiven and , the computational complexity of all diagonal entries is .\nIf and are given,\nthen the computational complexity of the variances in Eqs. 5 ###reference_### and 6 ###reference_### is respectively and .\nEach requires to evaluate a matrix, either \nor \u2014 which can be expensive to calculate for the latter.\nThis is why we need efficient estimators and / or bounds for the tensors on the LHS of Eqs. 4 ###reference_###, 5 ###reference_### and 6 ###reference_###."
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
"section_id": "Appendix 3",
|
| 69 |
+
"parent_section_id": null,
|
| 70 |
+
"section_name": "Appendix C Off-Diagonal Variance",
|
| 71 |
+
"text": "We consider an off-diagonal version of the bound given by Theorem 4.1 ###reference_theorem1###. Notice that in terms of the dependence on neural network weights, the only change is splitting the \u201cresponsibility\u201d of the \u2019th and \u2019th parameter norms.\n,\nwhere\nThe proof follows similarly to Appendices J ###reference_### and K ###reference_###, where the primary difference is just swapping the regular eigenvalue-like quantities with the variational forms.\n\u220e\nIt should be noted that the corresponding lower bounds become trivial as the additional degree of freedom of having an over both and causes the corresponding definition to have negative quantities.\nAlthough it is unclear what the \u201ctensor-like\u201d variational quantity will be, for a matrix, we have the following equivalence.\n, where is the maximum singular value of .\nThe proof follows from optimizing over and separately:\nThis is equivalent to the square root of the maximal eigenvalue of , which is exactly the maximum singular value.\n\u220e\nHence for the we have the following.\n,"
|
| 72 |
+
},
|
| 73 |
+
{
|
| 74 |
+
"section_id": "Appendix 4",
|
| 75 |
+
"parent_section_id": null,
|
| 76 |
+
"section_name": "Appendix D Bounding the Trace Variance by Full Spectrum",
|
| 77 |
+
"text": "For any ,\nwhere denotes the -th singular values, is the \u201creshaped\u201d matrix of defined in Theorem 4.1 ###reference_theorem1### \u2014 i.e. there exists such that for all ,\nand\nThe proof follows from a generalized Ruhe\u2019s trace inequality [Marshall et al., 2011 ###reference_b24###]:\nFor Hermitian matrices, we have that\nWe prove the result for each equations.\nFor readability, we let .\nFor Eq. 22 ###reference_###:\nOne can notice that the trace of the FIM can exactly be expressed as the trace of two matrices.\nThus, noting that the eigenvalue of the \u201csquared\u201d matrix is the matrix\u2019s singular value , with Theorem D.2 ###reference_theorem2###, we have that:\nFor Eq. 23 ###reference_###:\nNoting that .\nFurthermore, we have that\nLet us define the following 3D tensor with .\nThus, again simplifying the eigenvalue of the \u201csquared\u201d matrix, with Theorem D.2 ###reference_theorem2###, we have that:\nFor Eq. 24 ###reference_###:\nSimilar to Eq. 22 ###reference_###, we only need to rearrange the summation.\nNotice that\nthus .\nThus, again simplifying the eigenvalue of the \u201csquared\u201d matrix, with Theorem D.2 ###reference_theorem2###, we have that:\n\u220e"
|
| 78 |
+
},
|
| 79 |
+
{
|
| 80 |
+
"section_id": "Appendix 5",
|
| 81 |
+
"parent_section_id": null,
|
| 82 |
+
"section_name": "Appendix E Second Central Moment of Categorical Distribution",
|
| 83 |
+
"text": "We first notice that the exponential family density is given by,\nand thus also have\nThe first order derivative follows as,\nAs such, the second order derivatives also follow,\nAs such, we have that\n\u220e"
|
| 84 |
+
},
|
| 85 |
+
{
|
| 86 |
+
"section_id": "Appendix 6",
|
| 87 |
+
"parent_section_id": null,
|
| 88 |
+
"section_name": "Appendix F Empirical Results Continued",
|
| 89 |
+
"text": "In the following section we present additional details and results for the experimental verification we conduct in Section 5 ###reference_###.\nWe note that to calculate the diagonal Hessians required for the bounds and empirical FIM calculations, we utilize the BackPACK [Dangel et al., 2020 ###reference_b6###] for PyTorch.\nAdditionally, to calculate the sufficient statistics moment\u2019s spectrum, we explicitly solve the minimum and maximum eigenvalues via their optimization problems.\nFor 2D tensors / matrices, we utilize numpy.linalg.eig. For 4D tensors, we utilize PyTorch Minimize [Feinman, 2021 ###reference_b9###], a wrapper for SciPy\u2019s optimize function.\nWe present Figs. V ###reference_###, VI ###reference_###, VII ###reference_### and VIII ###reference_### which are the exact same experiment run in Section 5 ###reference_###, but with different initial NN weights and random inputs.\nFigures XI ###reference_###, XI ###reference_###, XI ###reference_###, XII ###reference_### and XIII ###reference_### show the experimental results on a 5-layer MLP and log-sigmoid activation function.\nIn most of the cases, the FIM and its associated variances quickly go to zero in the first few epochs.\n###figure_6### ###figure_7### ###figure_8### ###figure_9### ###figure_10### ###figure_11### ###figure_12### ###figure_13### ###figure_14###"
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"section_id": "Appendix 7",
|
| 93 |
+
"parent_section_id": null,
|
| 94 |
+
"section_name": "Appendix G \u201cEmpirical Fisher\u201d Continued",
|
| 95 |
+
"text": "Noting Lemma 6.1 ###reference_theorem1###\u2019s characterization of the covariance, we are able to characterize the variance of the diagonal elements of , denoted as .\nFor any ,\nwhere the 4th (non-central) moment of w.r.t. .\nAs a result of the similarity of the functional forms of the empirical Fisher and the FIM estimator , it is not surprising that Corollary G.1 ###reference_theorem1### is similar to the variance of . Indeed, applying Lemma 6.1 ###reference_theorem1### will give the exact same functional form with the 2nd central moments\nof \nw.r.t. exchanged with 2nd non-central moments\nof\n\nw.r.t. .\n is therefore determined by\nthe 2nd and the 4th\nmoment of\n up to the parameter transformation .\nSubsequently, the bounds presented for (Eqs. 8 ###reference_### and 4.6 ###reference_theorem6###) can be similarly adapted for .\nThe extension of to can also be proven in a similar manner to Theorem 4.7 ###reference_theorem7###.\nGiven samples of and samples of for each sampled,\nwhere is the variance of w.r.t. .\nIf for a set of observations ,\nthen one can directly evaluate the DFIM without sampling and achieve zero\nvariance, i.e., . In this\nscenario, there is a clear trade-off between the estimators of the FIM in\nEq. 3 ###reference_### and the DFIM. The estimators of the FIM are unbiased,\nbut have a variance; while the DFIM has zero variance, but is a biased\napproximation of the FIM."
|
| 96 |
+
},
|
| 97 |
+
{
|
| 98 |
+
"section_id": "Appendix 8",
|
| 99 |
+
"parent_section_id": null,
|
| 100 |
+
"section_name": "Appendix H Derivation of Eq.\u00a03 Using Log-Partition Function Derivatives",
|
| 101 |
+
"text": "In what follows, we derive the alternative equations for and presented in Section 3 ###reference_###.\nThat is, we seek to derive the following equations:\nWe calculate the equations separately.\nFor Eq. 26 ###reference_###, we note that\nwhere we note that which follows from the connection to expected parameters and partition functions of exponential families, see e.g. Soen and Sun [2021 ###reference_b37###].\nThen Eq. 26 ###reference_### follows immediately.\n\u220e\nFor Eq. 27 ###reference_###, we also calculate the derivative:\nThen\nThen Eq. 27 ###reference_### follows immediately.\n\u220e\nAlthough Eq. 27 ###reference_### is useful in practice, i.e., it states an equation which can be calculated via automatic differentiation, in the appendix and proofs we use an alternative equation. In particular, we use\nwhich follows from taking the derivative of in the proof of Eq. 26 ###reference_### (above)."
|
| 102 |
+
},
|
| 103 |
+
{
|
| 104 |
+
"section_id": "Appendix 9",
|
| 105 |
+
"parent_section_id": null,
|
| 106 |
+
"section_name": "Appendix I Proof of Eq.\u00a07",
|
| 107 |
+
"text": "We first begin by proving the follow lemma to bound an matrix.\nLet and , then\nThe proof follows immediately from the Courant-Fischer min-max theorem Tao [2012 ###reference_b43###]. That is,\nThus it follows that:\nThe lower bound follows identically.\nWe note that this can be similarly proven via trace bounds, e.g., Wang et al. [1986 ###reference_b44###].\n\u220e\nNow we can prove Eq. 7 ###reference_###.\nThe proof follows from Lemma 3.1 ###reference_theorem1###, Eq. 4 ###reference_###, and directly applying Lemma I.1 ###reference_theorem1###.\n\u220e"
|
| 108 |
+
},
|
| 109 |
+
{
|
| 110 |
+
"section_id": "Appendix 10",
|
| 111 |
+
"parent_section_id": null,
|
| 112 |
+
"section_name": "Appendix J Proof of Eq.\u00a08",
|
| 113 |
+
"text": "Let us first define the maximum and minimum Z-eigenvalues of a 4-dimensional tensor .\nNow We first prove the following lemma regarding the Z-eigenvalues.\nSuppose is 4-dimensional tensor. Then we have\nThe proof follows similarly to Lemma I.1 ###reference_theorem1###.\nWe simple use the following calculation:\nThe minimum case is proven identically (with the opposite inequality).\n\u220e\nNow we can prove the bounds of Eq. 8 ###reference_###\nFrom Lemma 3.1 ###reference_theorem1###, we have that\nwhere we shorthand\nWe bound two terms.\nwhich follows directly from Lemma J.1 ###reference_mtheorem1###\nWe now bound the second term in a similar way, taking and noting that\nwhich directly gives us,\nwhich follows from Lemma I.1 ###reference_theorem1###.\nThus, together these bounds prove Eq. 8 ###reference_###.\n\u220e"
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"section_id": "Appendix 11",
|
| 117 |
+
"parent_section_id": null,
|
| 118 |
+
"section_name": "Appendix K Proof of Eq.\u00a09",
|
| 119 |
+
"text": "From Lemma 3.1 ###reference_theorem1### we have that,\nThus we get\nwhich follows from Lemma I.1 ###reference_theorem1###.\nThis immediately gives the bound as required."
|
| 120 |
+
},
|
| 121 |
+
{
|
| 122 |
+
"section_id": "Appendix 12",
|
| 123 |
+
"parent_section_id": null,
|
| 124 |
+
"section_name": "Appendix L Proof of Corollary\u00a04.2",
|
| 125 |
+
"text": "The corollary holds from distributing the or and examining how the variational definition of the generalized \u2018eigenvalue\u2018 simplifies under tensor products.\nIndeed, for the minimum case,\nwhere the last line holds from the fact that is PSD (thus the inner Einstein summation is always positive).\nTaking definitions of the types of eigenvalues, gives the statement.\nWe note that the \u2018max\u2019 case follows identically.\nAdditionally, for the lower bound, we can show the non-triviallity of the non-negativity of the minimum eigenvalue.\nWe note that , where .\nThus we have that\nEquality holds from simply looking at the definition of and (as moments).\n\u220e"
|
| 126 |
+
},
|
| 127 |
+
{
|
| 128 |
+
"section_id": "Appendix 13",
|
| 129 |
+
"parent_section_id": null,
|
| 130 |
+
"section_name": "Appendix M Proof of Proposition\u00a04.3",
|
| 131 |
+
"text": "Letting , we note that the maximum eigenvalue is given by,\n\u220e"
|
| 132 |
+
},
|
| 133 |
+
{
|
| 134 |
+
"section_id": "Appendix 14",
|
| 135 |
+
"parent_section_id": null,
|
| 136 |
+
"section_name": "Appendix N Proof of Corollary\u00a04.6",
|
| 137 |
+
"text": "We split up the proof into the two arguments of the various -function.\nFor the right term:\nSuppose that we have a bound such that . Then,\nThus we have,\nTaking the appropriate and from Eqs. 8 ###reference_### and 9 ###reference_### proves the case for Eqs. 13 ###reference_### and 15 ###reference_###.\nFor Eq. 14 ###reference_###, that is taking\nWhere we note that\nFor Eq. 15 ###reference_###, that is taking\nWhere we note that\nFor the left term:\nWe take the largest singular value of the network derivative term. We then further notice that from norm ordering (of the matrix 2-norm).\nTo further elaborate on the Eq. 14 ###reference_### case, we further need to simplify the following:\nwhere the last inequality follows from the norm ordering .\n\u220e"
|
| 138 |
+
},
|
| 139 |
+
{
|
| 140 |
+
"section_id": "Appendix 15",
|
| 141 |
+
"parent_section_id": null,
|
| 142 |
+
"section_name": "Appendix O Proof of Theorem\u00a04.7",
|
| 143 |
+
"text": "To prove the Theorem, we will utilize the law of total variances.\nWe note, that by the premise of the Theorem, we are sampling many samples from and many samples from for each initially sampled.\nTo make this clear, the samples and sampling will be notated by:\nNote that using these samples, our empirical estimators for the FIM (for either estimator) will be of the form:\nfor an appropriately chosen .\nThis also gives:\nNow, we simplify the variance as follows:\nAs required."
|
| 144 |
+
},
|
| 145 |
+
{
|
| 146 |
+
"section_id": "Appendix 16",
|
| 147 |
+
"parent_section_id": null,
|
| 148 |
+
"section_name": "Appendix P Proof of Lemma\u00a04.8",
|
| 149 |
+
"text": "The lower bound holds from just considering the non-negativity of variance. For the upper bound, we utilize the bound directly consider the bounds of Eq. 7 ###reference_###,\n\u220e"
|
| 150 |
+
},
|
| 151 |
+
{
|
| 152 |
+
"section_id": "Appendix 17",
|
| 153 |
+
"parent_section_id": null,
|
| 154 |
+
"section_name": "Appendix Q Proof of Proposition\u00a05.1",
|
| 155 |
+
"text": "We first derive the statistics and presented in \u201cRegression: Isotropic Gaussian Distribution\u201d Section 5 ###reference_###.\nIt follows that from the regression setting, we have that,\nwhere notably, by definition, is independent of learned parameter .\nAs such, we have that:\nNow we note that is exactly as the parameter specifies the mean of the (isotropic) multivariate normal distribution.\nAs such we have that,\nFurthermore, by [Soen and Sun, 2021 ###reference_b37###, Lemma 5], we have that,\nIn summary, we have,\nThe minimum and maximum eigenvalues of follows directly noting that the trace of a matrix is the sum of eigenvalues. As such, from the statistics presented above we have that the minimum and eigenvalue must be .\nThe tensor eigenvalues of follows from the variational definition Eq. 10 ###reference_###. For instance, for the minimum eigenvalue,\nThe maximum eigenvalue is proven identically.\n\u220e"
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"section_id": "Appendix 18",
|
| 159 |
+
"parent_section_id": null,
|
| 160 |
+
"section_name": "Appendix R Proof of Theorem\u00a05.2",
|
| 161 |
+
"text": "We first prove the following corollary which connects the maximum eigenvalues of to the maximum eigenvalues of .\nSuppose that the exponential family in Eq. 1 ###reference_### is specified by a categorical distribution. Then,\nAs we are consider a categorical distribution we have that\nThus we have that . Furthermore, note that the maximum -norm that we can have is . Note that this is tight when the positive and negative mass are placed only two distinct coordinates, i.e., .\nThus using Proposition 4.3 ###reference_theorem3###, the result follows.\n\u220e\nNow by using Corollaries 4.2 ###reference_theorem2### and R.1 ###reference_mtheorem1###, the remainder of the proof, all we require is the bounding of .\nThe first term in the maximum eigenvalue follows from,\nThe second term in the maximum follows from the trace of being the sum of total eigenvalues.\n\u220e"
|
| 162 |
+
},
|
| 163 |
+
{
|
| 164 |
+
"section_id": "Appendix 19",
|
| 165 |
+
"parent_section_id": null,
|
| 166 |
+
"section_name": "Appendix S Proof of Lemma\u00a06.1",
|
| 167 |
+
"text": "The proof follows from the standard definition of covariance.\nDenoting , we have:\nAlso expanding :\nThus we have\nAs required.\n\u220e"
|
| 168 |
+
},
|
| 169 |
+
{
|
| 170 |
+
"section_id": "Appendix 20",
|
| 171 |
+
"parent_section_id": null,
|
| 172 |
+
"section_name": "Appendix T Proof of Corollary\u00a0G.1",
|
| 173 |
+
"text": "We calculate the variance:\nEach of the terms can be calculated:\nLet .\nAnd:\nTogether with Lemma 6.1 ###reference_theorem1### proves the theorem.\n\u220e"
|
| 174 |
+
},
|
| 175 |
+
{
|
| 176 |
+
"section_id": "Appendix 21",
|
| 177 |
+
"parent_section_id": null,
|
| 178 |
+
"section_name": "Appendix U Proof of Corollary\u00a0G.2",
|
| 179 |
+
"text": "The proof follows identically to that of Theorem 4.7 ###reference_theorem7### with densities changed.\n\u220e"
|
| 180 |
+
}
|
| 181 |
+
],
|
| 182 |
+
"tables": {
|
| 183 |
+
"1": {
|
| 184 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S3.T1\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S3.T1.20.3.1\" style=\"font-size:90%;\">Table 1</span>: </span><span class=\"ltx_text\" id=\"S3.T1.4.2\" style=\"font-size:90%;\">Exponential family statistics with eigenvalue upper bounds for moments. For classification, denotes the softmax of logit . \u2020\u00a0denotes exact eigenvalues rather than upper bounds.</span></figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S3.T1.18\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S3.T1.8.4\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_r ltx_border_tt\" id=\"S3.T1.8.4.5\">Setting</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_tt\" id=\"S3.T1.8.4.6\">Exp. Family</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S3.T1.5.1.1\">Output \n</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S3.T1.6.2.2\">Sufficient Statistic \n</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S3.T1.7.3.3\">UB \n</th>\n<th class=\"ltx_td ltx_nopad_r ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S3.T1.8.4.4\">UB \n</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S3.T1.12.8\">\n<td class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t\" id=\"S3.T1.12.8.5\">Regression</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T1.12.8.6\">(Iso.) Gaussian</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.9.5.1\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.10.6.2\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.11.7.3\">\n\u2020</td>\n<td class=\"ltx_td ltx_nopad_r ltx_align_center ltx_border_t\" id=\"S3.T1.12.8.4\">\n\u2020</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.16.12\">\n<td class=\"ltx_td ltx_align_left ltx_border_bb ltx_border_r\" id=\"S3.T1.16.12.5\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S3.T1.16.12.5.1\">Classification</span></td>\n<td class=\"ltx_td ltx_align_left ltx_border_bb\" id=\"S3.T1.16.12.6\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S3.T1.16.12.6.1\">Categorical</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T1.13.9.1\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S3.T1.13.9.1.1\"></span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T1.14.10.2\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S3.T1.14.10.2.1\"></span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T1.15.11.3\"></td>\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S3.T1.16.12.4\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.18.14\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T1.17.13.1\"></td>\n<td class=\"ltx_td ltx_nopad_r ltx_align_center ltx_border_bb\" id=\"S3.T1.18.14.2\"></td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 185 |
+
"capture": "Table 1: Exponential family statistics with eigenvalue upper bounds for moments. For classification, denotes the softmax of logit . \u2020\u00a0denotes exact eigenvalues rather than upper bounds."
|
| 186 |
+
}
|
| 187 |
+
},
|
| 188 |
+
"image_paths": {
|
| 189 |
+
"1": {
|
| 190 |
+
"figure_path": "2402.05379v3_figure_1.png",
|
| 191 |
+
"caption": "Figure 1: Natural gradient (NG) descent using \u2110^1\u2062(\ud835\udf3d)subscript^\u21101\ud835\udf3d\\hat{\\mathcal{I}}_{1}(\\bm{\\theta})over^ start_ARG caligraphic_I end_ARG start_POSTSUBSCRIPT 1 end_POSTSUBSCRIPT ( bold_italic_\u03b8 ) / \u2110^2\u2062(\ud835\udf3d)subscript^\u21102\ud835\udf3d\\hat{\\mathcal{I}}_{2}(\\bm{\\theta})over^ start_ARG caligraphic_I end_ARG start_POSTSUBSCRIPT 2 end_POSTSUBSCRIPT ( bold_italic_\u03b8 ) on a 2D toy dataset for regression (linear regression) and classification (logistic regression) (details in Appendix A).\nInset plot shows the parameter updates throughout training.\nHere, the variance of \u2110^2\u2062(\ud835\udf3d)subscript^\u21102\ud835\udf3d\\hat{\\mathcal{I}}_{2}(\\bm{\\theta})over^ start_ARG caligraphic_I end_ARG start_POSTSUBSCRIPT 2 end_POSTSUBSCRIPT ( bold_italic_\u03b8 ) is generally lower than \u2110^1\u2062(\ud835\udf3d)subscript^\u21101\ud835\udf3d\\hat{\\mathcal{I}}_{1}(\\bm{\\theta})over^ start_ARG caligraphic_I end_ARG start_POSTSUBSCRIPT 1 end_POSTSUBSCRIPT ( bold_italic_\u03b8 ).",
|
| 192 |
+
"url": "http://arxiv.org/html/2402.05379v3/x1.png"
|
| 193 |
+
},
|
| 194 |
+
"2": {
|
| 195 |
+
"figure_path": "2402.05379v3_figure_2.png",
|
| 196 |
+
"caption": "Figure 2: MNIST for a 4-layer MLP with sigmoid\nactivations. Top: The estimated Fisher information (FI), variances, and\nvariance bounds across 4 parameter groups and 20 training\nepochs. The FI (green line) is estimated\nusing \u2110^1subscript^\u21101\\hat{\\mathcal{I}}_{1}over^ start_ARG caligraphic_I end_ARG start_POSTSUBSCRIPT 1 end_POSTSUBSCRIPT (\u2110^2subscript^\u21102\\hat{\\mathcal{I}}_{2}over^ start_ARG caligraphic_I end_ARG start_POSTSUBSCRIPT 2 end_POSTSUBSCRIPT is almost identical and not shown for\nclarity). The s.t.d. (square root of variance) is shown\nfor variances and their bounds.\nBottom: the log-ratio of Theorem 4.1\u2019s upper bounds (UBs)\nand the true variances. The closer to 0, the better the UB.\nIn the right most column, the variance of \u2110^2subscript^\u21102\\hat{\\mathcal{I}}_{2}over^ start_ARG caligraphic_I end_ARG start_POSTSUBSCRIPT 2 end_POSTSUBSCRIPT vanishes: \ud835\udcb12\u2062(\u03b8i|\ud835\udc99)=0\u2264\ud835\udcb11\u2062(\u03b8i|\ud835\udc99)subscript\ud835\udcb12conditionalsubscript\ud835\udf03\ud835\udc56\ud835\udc990subscript\ud835\udcb11conditionalsubscript\ud835\udf03\ud835\udc56\ud835\udc99{\\mathcal{V}}_{2}(\\theta_{i}\\,|\\,\\bm{x})=0\\leq{\\mathcal{V}}_{1}(\\theta_{i}\\,|%\n\\,\\bm{x})caligraphic_V start_POSTSUBSCRIPT 2 end_POSTSUBSCRIPT ( italic_\u03b8 start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT | bold_italic_x ) = 0 \u2264 caligraphic_V start_POSTSUBSCRIPT 1 end_POSTSUBSCRIPT ( italic_\u03b8 start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT | bold_italic_x ). Thus related curves of \u2110^2subscript^\u21102\\hat{\\mathcal{I}}_{2}over^ start_ARG caligraphic_I end_ARG start_POSTSUBSCRIPT 2 end_POSTSUBSCRIPT are not shown.",
|
| 197 |
+
"url": "http://arxiv.org/html/2402.05379v3/x2.png"
|
| 198 |
+
},
|
| 199 |
+
"3": {
|
| 200 |
+
"figure_path": "2402.05379v3_figure_3.png",
|
| 201 |
+
"caption": "Figure I: Extended version of Fig. 1 with the sum of variance of FIM estimators over epochs.",
|
| 202 |
+
"url": "http://arxiv.org/html/2402.05379v3/x3.png"
|
| 203 |
+
},
|
| 204 |
+
"4": {
|
| 205 |
+
"figure_path": "2402.05379v3_figure_4.png",
|
| 206 |
+
"caption": "Figure II: Fig. I over different randomizations (a).",
|
| 207 |
+
"url": "http://arxiv.org/html/2402.05379v3/x4.png"
|
| 208 |
+
},
|
| 209 |
+
"5": {
|
| 210 |
+
"figure_path": "2402.05379v3_figure_5.png",
|
| 211 |
+
"caption": "Figure III: Fig. I over different randomizations (b).",
|
| 212 |
+
"url": "http://arxiv.org/html/2402.05379v3/x5.png"
|
| 213 |
+
},
|
| 214 |
+
"6": {
|
| 215 |
+
"figure_path": "2402.05379v3_figure_6.png",
|
| 216 |
+
"caption": "Figure IV: Fig. I over different randomizations (c).",
|
| 217 |
+
"url": "http://arxiv.org/html/2402.05379v3/x6.png"
|
| 218 |
+
},
|
| 219 |
+
"7": {
|
| 220 |
+
"figure_path": "2402.05379v3_figure_7.png",
|
| 221 |
+
"caption": "Figure V: The Fisher information, its variances and bounds of the variances\nw.r.t. a MLP trained with different initialization\nand a different input \ud835\udc99\ud835\udc99\\bm{x}bold_italic_x (a)",
|
| 222 |
+
"url": "http://arxiv.org/html/2402.05379v3/x7.png"
|
| 223 |
+
},
|
| 224 |
+
"8": {
|
| 225 |
+
"figure_path": "2402.05379v3_figure_8.png",
|
| 226 |
+
"caption": "Figure VI: The Fisher information, its variances and bounds of the variances\nw.r.t. a MLP trained with different initialization\nand a different input \ud835\udc99\ud835\udc99\\bm{x}bold_italic_x (b)",
|
| 227 |
+
"url": "http://arxiv.org/html/2402.05379v3/x8.png"
|
| 228 |
+
},
|
| 229 |
+
"9": {
|
| 230 |
+
"figure_path": "2402.05379v3_figure_9.png",
|
| 231 |
+
"caption": "Figure VII: The Fisher information, its variances and bounds of the variances\nw.r.t. a MLP trained with different initialization and a different input \ud835\udc99\ud835\udc99\\bm{x}bold_italic_x (c)",
|
| 232 |
+
"url": "http://arxiv.org/html/2402.05379v3/x9.png"
|
| 233 |
+
},
|
| 234 |
+
"10": {
|
| 235 |
+
"figure_path": "2402.05379v3_figure_10.png",
|
| 236 |
+
"caption": "Figure VIII: The Fisher information, its variances and bounds of the variances\nw.r.t. a MLP trained with different initialization and a different input \ud835\udc99\ud835\udc99\\bm{x}bold_italic_x (d)",
|
| 237 |
+
"url": "http://arxiv.org/html/2402.05379v3/x10.png"
|
| 238 |
+
},
|
| 239 |
+
"11(a)": {
|
| 240 |
+
"figure_path": "2402.05379v3_figure_11(a).png",
|
| 241 |
+
"caption": "Figure IX: The Fisher information, its variances and bounds of the variances\nw.r.t. a 5-layer MLP with log-sigmoid activation.",
|
| 242 |
+
"url": "http://arxiv.org/html/2402.05379v3/x11.png"
|
| 243 |
+
},
|
| 244 |
+
"11(b)": {
|
| 245 |
+
"figure_path": "2402.05379v3_figure_11(b).png",
|
| 246 |
+
"caption": "Figure IX: The Fisher information, its variances and bounds of the variances\nw.r.t. a 5-layer MLP with log-sigmoid activation.",
|
| 247 |
+
"url": "http://arxiv.org/html/2402.05379v3/x12.png"
|
| 248 |
+
},
|
| 249 |
+
"11(c)": {
|
| 250 |
+
"figure_path": "2402.05379v3_figure_11(c).png",
|
| 251 |
+
"caption": "Figure IX: The Fisher information, its variances and bounds of the variances\nw.r.t. a 5-layer MLP with log-sigmoid activation.",
|
| 252 |
+
"url": "http://arxiv.org/html/2402.05379v3/x13.png"
|
| 253 |
+
},
|
| 254 |
+
"12": {
|
| 255 |
+
"figure_path": "2402.05379v3_figure_12.png",
|
| 256 |
+
"caption": "Figure XII: The Fisher information, its variances and bounds of the variances\nw.r.t. a 5-layer MLP with log-sigmoid activation.",
|
| 257 |
+
"url": "http://arxiv.org/html/2402.05379v3/x14.png"
|
| 258 |
+
},
|
| 259 |
+
"13": {
|
| 260 |
+
"figure_path": "2402.05379v3_figure_13.png",
|
| 261 |
+
"caption": "Figure XIII: The Fisher information, its variances and bounds of the variances\nw.r.t. a 5-layer MLP with log-sigmoid activation.",
|
| 262 |
+
"url": "http://arxiv.org/html/2402.05379v3/x15.png"
|
| 263 |
+
}
|
| 264 |
+
},
|
| 265 |
+
"validation": true,
|
| 266 |
+
"references": [
|
| 267 |
+
{
|
| 268 |
+
"1": {
|
| 269 |
+
"title": "Information Geometry and Its Applications, volume 194 of\nApplied Mathematical Sciences.",
|
| 270 |
+
"author": "Shun-ichi Amari.",
|
| 271 |
+
"venue": "Springer-Verlag, Berlin, 2016.",
|
| 272 |
+
"url": null
|
| 273 |
+
}
|
| 274 |
+
},
|
| 275 |
+
{
|
| 276 |
+
"2": {
|
| 277 |
+
"title": "Dynamics of learning in MLP: Natural gradient and singularity\nrevisited.",
|
| 278 |
+
"author": "Shun-ichi Amari, Tomoko Ozeki, Ryo Karakida, Yuki Yoshida, and Masato\nOkada.",
|
| 279 |
+
"venue": "Neural Computation, 30(1):1\u201333, 2018.",
|
| 280 |
+
"url": null
|
| 281 |
+
}
|
| 282 |
+
},
|
| 283 |
+
{
|
| 284 |
+
"3": {
|
| 285 |
+
"title": "Fisher information and natural gradient learning in random deep\nnetworks.",
|
| 286 |
+
"author": "Shun-ichi Amari, Ryo Karakida, and Masafumi Oizumi.",
|
| 287 |
+
"venue": "In International Conference on Artificial Intelligence and\nStatistics, pages 694\u2013702. PMLR, 2019.",
|
| 288 |
+
"url": null
|
| 289 |
+
}
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"4": {
|
| 293 |
+
"title": "Improving the convergence of back-propagation learning with second\norder methods.",
|
| 294 |
+
"author": "Sue Becker, Yann Le Cun, et al.",
|
| 295 |
+
"venue": "In Proceedings of the 1988 connectionist models summer school,\npages 29\u201337, 1988.",
|
| 296 |
+
"url": null
|
| 297 |
+
}
|
| 298 |
+
},
|
| 299 |
+
{
|
| 300 |
+
"5": {
|
| 301 |
+
"title": "Preferred point geometry and statistical manifolds.",
|
| 302 |
+
"author": "Frank Critchley, Paul Marriott, and Mark Salmon.",
|
| 303 |
+
"venue": "The Annals of Statistics, pages 1197\u20131224, 1993.",
|
| 304 |
+
"url": null
|
| 305 |
+
}
|
| 306 |
+
},
|
| 307 |
+
{
|
| 308 |
+
"6": {
|
| 309 |
+
"title": "Backpack: Packing more into backprop.",
|
| 310 |
+
"author": "Felix Dangel, Frederik Kunstner, and Philipp Hennig.",
|
| 311 |
+
"venue": "In International Conference on Learning Representations, 2020.",
|
| 312 |
+
"url": null
|
| 313 |
+
}
|
| 314 |
+
},
|
| 315 |
+
{
|
| 316 |
+
"7": {
|
| 317 |
+
"title": "Curvature and inference for maximum likelihood estimates.",
|
| 318 |
+
"author": "Bradley Efron.",
|
| 319 |
+
"venue": "The Annals of Statistics, 46(4):1664\u20131692, 2018.",
|
| 320 |
+
"url": null
|
| 321 |
+
}
|
| 322 |
+
},
|
| 323 |
+
{
|
| 324 |
+
"8": {
|
| 325 |
+
"title": "Hesscale: Scalable computation of Hessian diagonals.",
|
| 326 |
+
"author": "Mohamed Elsayed and A Rupam Mahmood.",
|
| 327 |
+
"venue": "arXiv preprint arXiv:2210.11639, 2022.",
|
| 328 |
+
"url": null
|
| 329 |
+
}
|
| 330 |
+
},
|
| 331 |
+
{
|
| 332 |
+
"9": {
|
| 333 |
+
"title": "Pytorch-minimize: a library for numerical optimization with autograd,\n2021.",
|
| 334 |
+
"author": "Reuben Feinman.",
|
| 335 |
+
"venue": "URL https://github.com/rfeinman/pytorch-minimize.",
|
| 336 |
+
"url": null
|
| 337 |
+
}
|
| 338 |
+
},
|
| 339 |
+
{
|
| 340 |
+
"10": {
|
| 341 |
+
"title": "Sharpness-aware minimization for efficiently improving\ngeneralization.",
|
| 342 |
+
"author": "Pierre Foret, Ariel Kleiner, Hossein Mobahi, and Behnam Neyshabur.",
|
| 343 |
+
"venue": "In International Conference on Learning Representations, 2021.",
|
| 344 |
+
"url": null
|
| 345 |
+
}
|
| 346 |
+
},
|
| 347 |
+
{
|
| 348 |
+
"11": {
|
| 349 |
+
"title": "Relative accuracy of two methods for approximating observed Fisher\ninformation.",
|
| 350 |
+
"author": "Shenghan Guo and James C. Spall.",
|
| 351 |
+
"venue": "In Data-Driven Modeling, Filtering and Control: Methods and\napplications, pages 189\u2013211. IET Press, London, 2019.",
|
| 352 |
+
"url": null
|
| 353 |
+
}
|
| 354 |
+
},
|
| 355 |
+
{
|
| 356 |
+
"12": {
|
| 357 |
+
"title": "On \u201cnatural\u201d learning and pruning in multilayered perceptrons.",
|
| 358 |
+
"author": "Tom Heskes.",
|
| 359 |
+
"venue": "Neural Computation, 12(4):881\u2013901, 2000.",
|
| 360 |
+
"url": null
|
| 361 |
+
}
|
| 362 |
+
},
|
| 363 |
+
{
|
| 364 |
+
"13": {
|
| 365 |
+
"title": "Understanding approximate Fisher information for fast convergence\nof natural gradient descent in wide neural networks.",
|
| 366 |
+
"author": "Ryo Karakida and Kazuki Osawa.",
|
| 367 |
+
"venue": "Advances in neural information processing systems,\n33:10891\u201310901, 2020.",
|
| 368 |
+
"url": null
|
| 369 |
+
}
|
| 370 |
+
},
|
| 371 |
+
{
|
| 372 |
+
"14": {
|
| 373 |
+
"title": "Universal statistics of Fisher information in deep neural networks:\nMean field approach.",
|
| 374 |
+
"author": "Ryo Karakida, Shotaro Akaho, and Shun-ichi Amari.",
|
| 375 |
+
"venue": "In International Conference on Artificial Intelligence and\nStatistics, pages 1032\u20131041. PMLR, 2019.",
|
| 376 |
+
"url": null
|
| 377 |
+
}
|
| 378 |
+
},
|
| 379 |
+
{
|
| 380 |
+
"15": {
|
| 381 |
+
"title": "Pathological spectra of the Fisher information metric and its\nvariants in deep neural networks.",
|
| 382 |
+
"author": "Ryo Karakida, Shotaro Akaho, and Shun-ichi Amari.",
|
| 383 |
+
"venue": "Neural Computation, 33(8):2274\u20132307,\n2021.",
|
| 384 |
+
"url": null
|
| 385 |
+
}
|
| 386 |
+
},
|
| 387 |
+
{
|
| 388 |
+
"16": {
|
| 389 |
+
"title": "Adam: A method for stochastic optimization.",
|
| 390 |
+
"author": "Diederik P. Kingma and Jimmy Ba.",
|
| 391 |
+
"venue": "In International Conference on Learning Representations, 2015.",
|
| 392 |
+
"url": null
|
| 393 |
+
}
|
| 394 |
+
},
|
| 395 |
+
{
|
| 396 |
+
"17": {
|
| 397 |
+
"title": "An adaptive shifted power method for computing generalized tensor\neigenpairs.",
|
| 398 |
+
"author": "Tamara G Kolda and Jackson R Mayo.",
|
| 399 |
+
"venue": "SIAM Journal on Matrix Analysis and Applications, 35(4):1563\u20131581, 2014.",
|
| 400 |
+
"url": null
|
| 401 |
+
}
|
| 402 |
+
},
|
| 403 |
+
{
|
| 404 |
+
"18": {
|
| 405 |
+
"title": "Limitations of the empirical Fisher approximation for natural\ngradient descent.",
|
| 406 |
+
"author": "Frederik Kunstner, Lukas Balles, and Philipp Hennig.",
|
| 407 |
+
"venue": "In Advances in Neural Information Processing Systems, pages\n4133\u20134144. Curran Associates, Inc., 2020.",
|
| 408 |
+
"url": null
|
| 409 |
+
}
|
| 410 |
+
},
|
| 411 |
+
{
|
| 412 |
+
"19": {
|
| 413 |
+
"title": "Iterative weighted least squares algorithms for neural networks\nclassifiers.",
|
| 414 |
+
"author": "Takio Kurita.",
|
| 415 |
+
"venue": "New generation computing, 12:375\u2013394, 1994.",
|
| 416 |
+
"url": null
|
| 417 |
+
}
|
| 418 |
+
},
|
| 419 |
+
{
|
| 420 |
+
"20": {
|
| 421 |
+
"title": "Topmoumoute online natural gradient algorithm.",
|
| 422 |
+
"author": "Nicolas Le Roux, Pierre-Antoine Manzagol, and Yoshua Bengio.",
|
| 423 |
+
"venue": "Advances in neural information processing systems, 20, 2007.",
|
| 424 |
+
"url": null
|
| 425 |
+
}
|
| 426 |
+
},
|
| 427 |
+
{
|
| 428 |
+
"21": {
|
| 429 |
+
"title": "MNIST handwritten digit database.",
|
| 430 |
+
"author": "Yann LeCun, Corinna Cortes, and CJ Burges.",
|
| 431 |
+
"venue": "ATT Labs [Online], 2, 2010.",
|
| 432 |
+
"url": null
|
| 433 |
+
}
|
| 434 |
+
},
|
| 435 |
+
{
|
| 436 |
+
"22": {
|
| 437 |
+
"title": "Visualizing the loss landscape of neural nets.",
|
| 438 |
+
"author": "Hao Li, Zheng Xu, Gavin Taylor, Christoph Studer, and Tom Goldstein.",
|
| 439 |
+
"venue": "Advances in neural information processing systems, 31, 2018.",
|
| 440 |
+
"url": null
|
| 441 |
+
}
|
| 442 |
+
},
|
| 443 |
+
{
|
| 444 |
+
"23": {
|
| 445 |
+
"title": "Singular values and eigenvalues of tensors: a variational approach.",
|
| 446 |
+
"author": "Lek-Heng Lim.",
|
| 447 |
+
"venue": "In 1st IEEE International Workshop on Computational Advances in\nMulti-Sensor Adaptive Processing, pages 129\u2013132. IEEE, 2005.",
|
| 448 |
+
"url": null
|
| 449 |
+
}
|
| 450 |
+
},
|
| 451 |
+
{
|
| 452 |
+
"24": {
|
| 453 |
+
"title": "Inequalities: Theory of Majorization and its Applications,\nvolume 143 of Springer Series in Statistics (SSS).",
|
| 454 |
+
"author": "Albert W. Marshall, Ingram Olkin, and Barry C. Arnold.",
|
| 455 |
+
"venue": "Springer, second edition, 2011.",
|
| 456 |
+
"url": null
|
| 457 |
+
}
|
| 458 |
+
},
|
| 459 |
+
{
|
| 460 |
+
"25": {
|
| 461 |
+
"title": "New insights and perspectives on the natural gradient method.",
|
| 462 |
+
"author": "James Martens.",
|
| 463 |
+
"venue": "Journal of Machine Learning Research, 21(146):1\u201376, 2020.",
|
| 464 |
+
"url": null
|
| 465 |
+
}
|
| 466 |
+
},
|
| 467 |
+
{
|
| 468 |
+
"26": {
|
| 469 |
+
"title": "Optimizing neural networks with Kronecker-factored approximate\ncurvature.",
|
| 470 |
+
"author": "James Martens and Roger Grosse.",
|
| 471 |
+
"venue": "In International Conference on Machine Learning, pages\n2408\u20132417. PMLR, 2015.",
|
| 472 |
+
"url": null
|
| 473 |
+
}
|
| 474 |
+
},
|
| 475 |
+
{
|
| 476 |
+
"27": {
|
| 477 |
+
"title": "Deep learning via Hessian-free optimization.",
|
| 478 |
+
"author": "James Martens et al.",
|
| 479 |
+
"venue": "In ICML, volume 27, pages 735\u2013742, 2010.",
|
| 480 |
+
"url": null
|
| 481 |
+
}
|
| 482 |
+
},
|
| 483 |
+
{
|
| 484 |
+
"28": {
|
| 485 |
+
"title": "The memory-perturbation equation: Understanding model's sensitivity to data.",
|
| 486 |
+
"author": "Peter Nickl, Lu Xu, Dharmesh Tailor, Thomas M\u00f6llenhoff, and Mohammad\nEmtiyaz E Khan.",
|
| 487 |
+
"venue": "In Advances in Neural Information Processing Systems,\nvolume 36, pages 26923\u201326949. Curran Associates, Inc., 2023.",
|
| 488 |
+
"url": null
|
| 489 |
+
}
|
| 490 |
+
},
|
| 491 |
+
{
|
| 492 |
+
"29": {
|
| 493 |
+
"title": "Monte Carlo Information-Geometric Structures, pages 69\u2013103.",
|
| 494 |
+
"author": "Frank Nielsen and Ga\u00ebtan Hadjeres.",
|
| 495 |
+
"venue": "Springer International Publishing, 2019.",
|
| 496 |
+
"url": null
|
| 497 |
+
}
|
| 498 |
+
},
|
| 499 |
+
{
|
| 500 |
+
"30": {
|
| 501 |
+
"title": "Riemannian metrics for neural networks I: feedforward networks.",
|
| 502 |
+
"author": "Yann Ollivier.",
|
| 503 |
+
"venue": "Information and Inference: A Journal of the IMA, 4(2):108\u2013153, 2015.",
|
| 504 |
+
"url": null
|
| 505 |
+
}
|
| 506 |
+
},
|
| 507 |
+
{
|
| 508 |
+
"31": {
|
| 509 |
+
"title": "Traces of class/cross-class structure pervade deep learning spectra.",
|
| 510 |
+
"author": "Vardan Papyan.",
|
| 511 |
+
"venue": "Journal of Machine Learning Research, 21(252):1\u201364, 2020.",
|
| 512 |
+
"url": null
|
| 513 |
+
}
|
| 514 |
+
},
|
| 515 |
+
{
|
| 516 |
+
"32": {
|
| 517 |
+
"title": "Revisiting natural gradient for deep networks.",
|
| 518 |
+
"author": "Razvan Pascanu and Yoshua Bengio.",
|
| 519 |
+
"venue": "In International Conference on Learning Representations, 2014.",
|
| 520 |
+
"url": null
|
| 521 |
+
}
|
| 522 |
+
},
|
| 523 |
+
{
|
| 524 |
+
"33": {
|
| 525 |
+
"title": "Pytorch: An imperative style, high-performance deep learning library.",
|
| 526 |
+
"author": "Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory\nChanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban\nDesmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan\nTejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith\nChintala.",
|
| 527 |
+
"venue": "In Advances in Neural Information Processing Systems, pages\n8024\u20138035. Curran Associates, Inc., 2019.",
|
| 528 |
+
"url": null
|
| 529 |
+
}
|
| 530 |
+
},
|
| 531 |
+
{
|
| 532 |
+
"34": {
|
| 533 |
+
"title": "The spectrum of the Fisher information matrix of a\nsingle-hidden-layer neural network.",
|
| 534 |
+
"author": "Jeffrey Pennington and Pratik Worah.",
|
| 535 |
+
"venue": "In Advances in Neural Information Processing Systems, pages\n5415\u20135424, 2018.",
|
| 536 |
+
"url": null
|
| 537 |
+
}
|
| 538 |
+
},
|
| 539 |
+
{
|
| 540 |
+
"35": {
|
| 541 |
+
"title": "Tensor normal training for deep learning models.",
|
| 542 |
+
"author": "Yi Ren and Donald Goldfarb.",
|
| 543 |
+
"venue": "In Advances in Neural Information Processing Systems,\nvolume 34, pages 26040\u201326052. Curran Associates, Inc., 2021.",
|
| 544 |
+
"url": null
|
| 545 |
+
}
|
| 546 |
+
},
|
| 547 |
+
{
|
| 548 |
+
"36": {
|
| 549 |
+
"title": "Woodfisher: Efficient second-order approximation for neural network\ncompression.",
|
| 550 |
+
"author": "Sidak Pal Singh and Dan Alistarh.",
|
| 551 |
+
"venue": "In Advances in Neural Information Processing Systems,\nvolume 33, pages 18098\u201318109. Curran Associates, Inc., 2020.",
|
| 552 |
+
"url": null
|
| 553 |
+
}
|
| 554 |
+
},
|
| 555 |
+
{
|
| 556 |
+
"37": {
|
| 557 |
+
"title": "On the variance of the Fisher information for deep learning.",
|
| 558 |
+
"author": "Alexander Soen and Ke Sun.",
|
| 559 |
+
"venue": "In Advances in Neural Information Processing Systems,\nvolume 34, pages 5708\u20135719. Curran Associates, Inc., 2021.",
|
| 560 |
+
"url": null
|
| 561 |
+
}
|
| 562 |
+
},
|
| 563 |
+
{
|
| 564 |
+
"38": {
|
| 565 |
+
"title": "Trade-offs of diagonal Fisher information matrix estimators.",
|
| 566 |
+
"author": "Alexander Soen and Ke Sun.",
|
| 567 |
+
"venue": "In Advances in Neural Information Processing Systems,\nvolume 37, 2024.",
|
| 568 |
+
"url": null
|
| 569 |
+
}
|
| 570 |
+
},
|
| 571 |
+
{
|
| 572 |
+
"39": {
|
| 573 |
+
"title": "Information geometry for data geometry through pullbacks.",
|
| 574 |
+
"author": "Ke Sun.",
|
| 575 |
+
"venue": "In Deep Learning through Information Geometry (Workshop at\nNeurIPS 2020), 2020.",
|
| 576 |
+
"url": null
|
| 577 |
+
}
|
| 578 |
+
},
|
| 579 |
+
{
|
| 580 |
+
"40": {
|
| 581 |
+
"title": "Relative Fisher information and natural gradient for learning large\nmodular models.",
|
| 582 |
+
"author": "Ke Sun and Frank Nielsen.",
|
| 583 |
+
"venue": "In International Conference on Machine Learning, pages\n3289\u20133298, 2017.",
|
| 584 |
+
"url": null
|
| 585 |
+
}
|
| 586 |
+
},
|
| 587 |
+
{
|
| 588 |
+
"41": {
|
| 589 |
+
"title": "A geometric modeling of Occam\u2019s razor in deep learning.",
|
| 590 |
+
"author": "Ke Sun and Frank Nielsen.",
|
| 591 |
+
"venue": "arXiv preprint arXiv:1905.11027, 2019.",
|
| 592 |
+
"url": null
|
| 593 |
+
}
|
| 594 |
+
},
|
| 595 |
+
{
|
| 596 |
+
"42": {
|
| 597 |
+
"title": "Connection of diagonal Hessian estimates to natural gradients in\nstochastic optimization.",
|
| 598 |
+
"author": "Shiqing Sun and James C. Spall.",
|
| 599 |
+
"venue": "In Proceedings of the 55th Annual Conference on Information\nSciences and Systems (CISS), 2021.",
|
| 600 |
+
"url": null
|
| 601 |
+
}
|
| 602 |
+
},
|
| 603 |
+
{
|
| 604 |
+
"43": {
|
| 605 |
+
"title": "Topics in random matrix theory, volume 132.",
|
| 606 |
+
"author": "Terence Tao.",
|
| 607 |
+
"venue": "American Mathematical Soc., 2012.",
|
| 608 |
+
"url": null
|
| 609 |
+
}
|
| 610 |
+
},
|
| 611 |
+
{
|
| 612 |
+
"44": {
|
| 613 |
+
"title": "Trace bounds on the solution of the algebraic matrix riccati and\nlyapunov equation.",
|
| 614 |
+
"author": "Sheng-De Wang, Te-Son Kuo, and Chen-Fa Hsu.",
|
| 615 |
+
"venue": "IEEE Transactions on Automatic Control, 31(7):654\u2013656, 1986.",
|
| 616 |
+
"url": null
|
| 617 |
+
}
|
| 618 |
+
},
|
| 619 |
+
{
|
| 620 |
+
"45": {
|
| 621 |
+
"title": "The spectral decomposition and inverse of multinomial and negative\nmultinomial covariances.",
|
| 622 |
+
"author": "Christopher S Withers and Saralees Nadarajah.",
|
| 623 |
+
"venue": "Brazilian Journal of Probability and Statistics, pages\n376\u2013380, 2014.",
|
| 624 |
+
"url": null
|
| 625 |
+
}
|
| 626 |
+
},
|
| 627 |
+
{
|
| 628 |
+
"46": {
|
| 629 |
+
"title": "Pyhessian: Neural networks through the lens of the Hessian.",
|
| 630 |
+
"author": "Zhewei Yao, Amir Gholami, Kurt Keutzer, and Michael W Mahoney.",
|
| 631 |
+
"venue": "In 2020 IEEE international conference on big data (Big data),\npages 581\u2013590. IEEE, 2020.",
|
| 632 |
+
"url": null
|
| 633 |
+
}
|
| 634 |
+
},
|
| 635 |
+
{
|
| 636 |
+
"47": {
|
| 637 |
+
"title": "Adahessian: An adaptive second order optimizer for machine learning.",
|
| 638 |
+
"author": "Zhewei Yao, Amir Gholami, Sheng Shen, Mustafa Mustafa, Kurt Keutzer, and\nMichael Mahoney.",
|
| 639 |
+
"venue": "In proceedings of the AAAI conference on artificial\nintelligence, volume 35, pages 10665\u201310673, 2021.",
|
| 640 |
+
"url": null
|
| 641 |
+
}
|
| 642 |
+
}
|
| 643 |
+
],
|
| 644 |
+
"url": "http://arxiv.org/html/2402.05379v3"
|
| 645 |
+
}
|
20241030/2402.06353v3.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2402.09299v4.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241030/2402.10360v3.json
ADDED
|
@@ -0,0 +1,304 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Transductive Learning Is Compact",
|
| 3 |
+
"abstract": "We demonstrate a\ncompactness result holding broadly across supervised learning with a general class of loss functions: Any hypothesis class is learnable with transductive sample complexity precisely when\nall of its finite projections are learnable with sample complexity .\nWe prove that this exact form of compactness holds for realizable and agnostic learning with respect to any proper metric loss function (e.g., any norm on ) and any continuous loss on a compact space (e.g., cross-entropy, squared loss).\nFor realizable learning with improper metric losses, we show that exact compactness of sample complexity can fail, and provide matching upper and lower bounds of a factor of 2 on the extent to which such sample complexities can differ. We conjecture that larger gaps are possible for the agnostic case. Furthermore, invoking the equivalence between sample complexities in the PAC and transductive models (up to lower order factors, in the realizable case) permits us to directly port our results to the PAC model, revealing an almost-exact form of compactness holding broadly in PAC learning.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Compactness results in mathematics describe the behavior by which, roughly speaking, an infinite system can be entirely understood by inspecting its finite subsystems: An infinite graph is -colorable precisely when its finite subgraphs are all -colorable (De Bruijn and Erd\u00f6s, 1951 ###reference_b12###), an infinite collection of compact sets in has non-empty intersection precisely when the same is true of its finite subcollections, etc. In each case, compactness reveals a profound and striking structure, by which local understanding of a problem immediately yields global understanding.\nWe demonstrate that supervised learning in the transductive model enjoys such structure. First, let us briefly review the transductive model, a close relative of the PAC model. In the realizable setting with a class of hypotheses , it is defined by the following sequence of steps:\nAn adversary selects unlabeled data and a hypothesis .\nThe unlabeled datapoints are displayed to the learner.\nOne datapoint is selected uniformly at random from . The remaining datapoints\nand their labels under are displayed to the learner.\nThe learner is prompted to predict the label of , i.e., .\nThe expected error incurred by the learner over the uniformly random choice of is its transductive error on this learning instance, from which one can easily define the transductive sample complexity of a learner and of a hypothesis class.\nNotably, transductive learning, originally introduced by Vapnik and Chervonenkis (1974 ###reference_b24###) and Vapnik (1982 ###reference_b23###), is a fundamental approach to learning with deep theoretical connections to the PAC model. We study the transductive model as employed by the pioneering work of Haussler et al. (1994 ###reference_b18###), who introduced the celebrated one-inclusion graph (OIG) to study transduction and used it to derive improved error bounds for VC classes. More recently, transductive learning and OIGs have been used to (among other work) establish the first characterizations of learnability for multiclass classification and realizable regression (Brukhim et al., 2022 ###reference_b10###; Attias et al., 2023 ###reference_b5###), to prove optimal PAC bounds across several learning settings (Aden-Ali et al., 2023b ###reference_b2###), and to understand regularization in multiclass learning (Asilis et al., 2024 ###reference_b4###). (See also Daniely and Shalev-Shwartz (2014 ###reference_b11###); Alon et al. (2022 ###reference_b3###); Montasser et al. (2022 ###reference_b20###); Aden-Ali et al. (2023a ###reference_b1###).) The transductive model also naturally generalizes to the agnostic setting, much like PAC learning, as articulated by Asilis et al. (2024 ###reference_b4###)."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "1.1",
|
| 13 |
+
"parent_section_id": "1",
|
| 14 |
+
"section_name": "Contributions",
|
| 15 |
+
"text": "Our results involve comparing a hypothesis class to its \u201cfinite projections.\u201d Formally, for a hypothesis class and any finite collection of unlabeled data , we refer to the finite subsets of as finite projections of . Note that is being \u201cmade finite\u201d at two levels: first by restricting its functions to a finite region of the domain, and second by passing to a finite subset of . Thus, any finite projection of , e.g. , is necessarily a finite set of behaviors, , regardless of whether in its totality is infinite (as may easily occur if is infinite).\nAs our cornerstone result, we demonstrate in Theorem 3.6 ###reference_theorem6### that for the case of supervised learning with a large class of proper111We warn that we will shortly be overloading the term \u201cproper\u201d, as we discuss proper metric\nspaces and proper functions between metric spaces. We also note that our notion of properness is unrelated to losses which incentivize predicting the true probability, from e.g. Blasiok et al. (2023 ###reference_b9###). (And unrelated to proper vs. improper learners; we consider improper learners throughout the paper, which can emit predictors outside the class .) metric loss functions\n(including any norm on or its closed subsets; see Definition 3.2 ###reference_theorem2###)\na class can be learned with transductive sample complexity precisely when the same is true of all its finite projections. In fact, in Theorem 3.7 ###reference_theorem7### we extend our results to arbitrary continuous losses on compact metric spaces, e.g., cross-entropy loss on finite-dimensional probability spaces and squared loss on compact subsets of .\nFor learning over arbitrary label spaces, we demonstrate in Theorems 3.8 ###reference_theorem8### and 3.9 ###reference_theorem9### that compactness fails: for realizable learning with metric losses, we provide matching upper and lower bounds of a factor of 2 on the extent to which such transductive sample complexities can differ. Our lower bound transfers directly to transductive learning in the agnostic case, for which we conjecture that larger gaps in sample complexity are possible.\nWe stress that our compactness results are exact in the transductive model, avoiding dilution by asymptotics or even by constants. In addition, there is a growing body of work relating sample complexities in the transductive and PAC models, by which our results directly transfer in a black-box manner (Asilis et al., 2024 ###reference_b4###; Aden-Ali et al., 2023b ###reference_b2###; Dughmi et al., 2024 ###reference_b13###). Notably, for realizable learning with any bounded loss, PAC sample complexities differ from their transductive counterparts by at most a logarithmic factor in , the confidence parameter. Combined with our results, this reveals an almost-exact form of compactness for realizable PAC learning, as we describe in Section 3.4 ###reference_###.222Note too that any future improvements to the connections between the PAC and transductive models, whether in the realizable or agnostic settings, will be automatically inherited by our results in a black-box manner.\nOur results hold for improper learners, i.e., learners that are permitted to emit a predictor outside the underlying class . Curiously, compactness of sample complexity can be seen to fail strongly when one requires that learners be proper, using the work of Ben-David et al. (2019 ###reference_b7###). This demonstrates a structural difference between proper and improper learning; see Appendix B ###reference_### for further detail.\nOur compactness results are underpinned by a generalization of the classic marriage theorems for bipartite graphs which may be of independent mathematical interest. The original marriage theorem, due to Philip Hall (Hall, 1935 ###reference_b14###), articulates a necessary and sufficient condition for the existence of a perfect matching from one side of a finite bipartite graph to the other. Subsequently, Marshall Hall (Hall Jr, 1948 ###reference_b15###) extended the same characterization, referencing only finite subgraphs, to infinite graphs of arbitrary cardinality, provided the side to be matched has finite degrees \u2014 the characterization being false otherwise, as can be seen by a simple countable example. This characterization therefore serves as a compactness result for matching on such infinite graphs. The proof of M. Hall features an involved analysis of the lattice of \u201cblocking sets\u201d, and invokes the axiom of choice through Zorn\u2019s lemma. Simpler proofs have since been discovered: a topological proof by Halmos and Vaughan (1950 ###reference_b16###) which invokes the axiom of choice through Tychonoff\u2019s theorem, and an algebraic proof by Rado (1967 ###reference_b21###) which also uses Zorn\u2019s lemma. At the heart of our paper is a compactness result (Theorem 3.3 ###reference_theorem3###) for a variable-assignment problem which generalizes both supervised learning and bipartite matching: one side of a bipartite graph indexes infinitely many variables, the other indexes infinitely many functions that depend on finitely many variables each, and the goal is to assign all the variables in a manner that maintains all functions below a target value. Our proof draws inspiration from all three of the aforementioned proofs of M. Hall\u2019s theorem, and goes through Zorn\u2019s lemma."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "1.2",
|
| 19 |
+
"parent_section_id": "1",
|
| 20 |
+
"section_name": "Related Work",
|
| 21 |
+
"text": "The transductive approach to learning dates to the work of Vapnik and Chervonenkis (1974 ###reference_b24###) and Vapnik (1982 ###reference_b23###), and has inspired a breadth of recent advances across regression, classification, and various other learning regimes; see our introduction for a brief overview.\nRegarding transductive sample complexities, Hanneke et al. (2023 ###reference_b17###) recently demonstrated a trichotomy result for optimal transductive error rates in the online setting of Ben-David et al. (1997 ###reference_b6###). In contrast, we focus on the classical (batch) setting, as described in Section 2.2 ###reference_###.\nPerhaps most related to the present work is Attias et al. (2023 ###reference_b5###), which introduces the -OIG dimension and demonstrates that it characterizes learnability for supervised learning problems with pseudometric losses. Notably, this is the first general dimension characterizing learnability across essentially the entirety of supervised learning. The -OIG dimension itself establishes a qualitative form of compactness \u2014 as it is defined using only the finite projections of a class \u2014 but we note that it has not been shown to tightly characterize the sample complexity of learning. Furthermore, it is analyzed only for realizable learning, which is in general not equivalent to agnostic learning (e.g., for regression). Our work, in contrast, establishes exact compactness for the sample complexity of transductive learning for both the realizable and agnostic settings, with respect to a general class of loss functions. Moreover, in Appendix B ###reference_### we extend our results to certain cases of distribution-family learning, including realizable learning of partial concept classes."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2",
|
| 25 |
+
"parent_section_id": null,
|
| 26 |
+
"section_name": "Preliminaries",
|
| 27 |
+
"text": ""
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "2.1",
|
| 31 |
+
"parent_section_id": "2",
|
| 32 |
+
"section_name": "Notation",
|
| 33 |
+
"text": "For a natural number , denotes the set . For a predicate , denotes the Iverson bracket of , i.e., when is true and 0 otherwise. When is a set, denotes the set of all finite sequences in , i.e., . For a tuple , we use to denote with its th entry removed, i.e., ."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "2.2",
|
| 37 |
+
"parent_section_id": "2",
|
| 38 |
+
"section_name": "Transductive Learning",
|
| 39 |
+
"text": "Let us recall the standard toolkit of supervised learning. A learning problem is determined by a domain , label space , and hypothesis class . The elements of are functions ; such functions are referred to as hypotheses or predictors. Learning also requires a loss function\n (or ) from to ,\nwhich often endows with the structure of a metric space. Throughout the paper, we permit to be arbitrary. A labeled datapoint is a pair and an unlabeled datapoint is an element . A training set, or training sample, is a tuple of labeled datapoints . A learner is a function from training sets to predictors, i.e., .\nRealizable transductive learning is defined as follows: An adversary selects and a hypothesis . The unlabeled datapoints are displayed to the learner. Then one datapoint is selected uniformly at random from , and the remaining datapoints and their labels under are displayed to the learner. Lastly,\nthe learner is prompted to predict the label of , i.e., .\nWe refer to the information of as in Definition 2.1 ###reference_theorem1### as an instance of transductive learning, and to as the (randomly selected) test datapoint and the (randomly selected) training datapoints. The transductive error incurred by a learner on an instance is its average error over the uniformly random choice of test datapoint, i.e.,\nwhere denotes the output of on the sample .\nHaving defined transductive error, it is natural to define error rates and sample complexity.\nThe transductive error rate of a learner for is the function defined by .\nThe transductive sample complexity of a learner for is the function .\nThe transductive error rate of a class is the minimal error rate attained by any of its learners, i.e., .\nThe transductive sample complexity of is the function mapping to the minimal for which for all . That is,\nWe say that is learnable in the realizable case with transductive sample function when for all .\nInformally, agnostic transductive learning is the analogue in which the adversary is permitted to label the data in arbitrarily, and in which the learner need only compete with the best hypothesis in . We defer the formal definition to Section 3.3 ###reference_###."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "3",
|
| 43 |
+
"parent_section_id": null,
|
| 44 |
+
"section_name": "Compactness of Learning",
|
| 45 |
+
"text": "We present the central result of the paper in this section: the transductive sample complexity of learning is a compact property of a hypothesis class. In Section 3.1 ###reference_### we study compactness of realizable supervised learning over proper loss functions, and demonstrate a strong compactness result: a class is learnable with transductive sample complexity if and only if all its finite projections are learnable with the same complexity. In Section 3.2 ###reference_### we examine the case of realizable supervised learning over improper loss functions and prove a negative result: the previous compactness result no longer holds in this more general setting. Nevertheless, we demonstrate an approximate form of compactness, up to a factor of 2, for (improper) metric losses. Moreover, we show exact compactness for the special case of the (improper) 0-1 loss function, i.e., multiclass classification over arbitrary, possibly infinite label sets. Notably, this recovers M. Hall\u2019s classic matching theorem for infinite graphs (Hall Jr, 1948 ###reference_b15###) as a corollary to our central result. In Section 3.3 ###reference_### we examine analogues of our results for agnostic learning, and in Section 3.4 ###reference_### we transfer our results to the PAC model via standard equivalences, obtaining approximate compactness of sample complexities. Due to space constraints, we defer an extension of our results to distribution-family PAC learning to Appendix B ###reference_###."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "3.1",
|
| 49 |
+
"parent_section_id": "3",
|
| 50 |
+
"section_name": "Realizable Learning With Proper Loss Functions",
|
| 51 |
+
"text": "We first consider the case of loss functions defined on a proper metric space .\nA metric space is proper if its closed and bounded subsets are all compact.\nA related notion is that of a proper map between metric spaces.\nA function between metric spaces is proper if it reflects compact sets, i.e., is compact when is compact.\nWe remark that proper spaces are sometimes referred to as Heine-Borel spaces, and that their examples include endowed with any norm, all closed subsets of (under the same norms), and all finite sets endowed with arbitrary metrics. Further discussion of proper metric spaces is provided in Appendix A ###reference_###. The central technical result of this subsection is a compactness property concerning assignments of variables to metric spaces that maintain a family of functions below a target value .\nLet be a collection of variables, with each variable taking values in a metric space . Let be a collection of proper functions, each of which depends upon finitely many variables in and has codomain . Then the following conditions are equivalent for any .\nThere exists an assignment of all variables in which keeps the output of each function no greater than .\nFor each finite subset of , there exists an assignment of all variables in which keeps the output of each function no greater than .\nis immediate. Before arguing the reverse direction, some terminology: a partial assignment of variables is an assignment of variables for a subset of . A partial assignment is said to be completable with respect to if its unassigned variables can all be assigned so that all functions are kept below . A partial assignment is finitely completable if it is completable with respect to all finite subsets of . This is a pointwise condition: the completions are permitted to vary across \u2019s subsets.\nGiven a finitely completable partial assignment with an unassigned variable, one such variable can be assigned while preserving finite completability.\nFix any unassigned variable ; we will assign it while preserving finite completability. For each set , let consist of those assignments of that preserve completability with respect to . By the assumption of finite completability, we have that is non-empty for all finite . We claim furthermore that is compact for finite .\nTo see why, let and let be the variables in upon which the functions in depend. Suppose without loss of generality that and that nodes have already been assigned.\nConsider the function mapping assignments of the to the outputs they induce on the functions in . (Notably, this includes the assignments already made for .) As the functions in are proper, including when fixing some of their inputs, is as well.\nThus is compact, as is its projection onto its first coordinate. That set is precisely , demonstrating our intermediate claim. We thus have a family of compact, non-empty sets .\nNote that finite intersections of elements of are non-empty, as .\nIn metric spaces, an infinite family of compact sets has non-empty intersection if and only if the same holds for its finite intersections. Thus, by compactness of each element of , the intersection across all of is non-empty. That is, there exists an assignment for which is completable with respect to all finite subsets of . The claim follows.\n\u220e\nWe now complete the argument using Zorn\u2019s lemma. Let be the poset whose elements are finitely completable assignments, where if agrees with all assignments made by and perhaps assigns additional variables. Note first that chains in have upper bounds. In particular, let be a chain and define to be the \u201cunion\u201d of assignments in , i.e., leaves unassigned if all leave unassigned, otherwise assigns to the unique element used by assignments in .\nClearly serves as an upper bound of , provided that . To see that , fix a finite set . is incident to a finite collection of nodes in , say . Suppose are those which are assigned by , and let be assignments which assign (i.e., do not leave free) the respective nodes . Then, as is totally ordered, it must be that one of assigns all of the variables . That is, there exists which agrees with in its action on . As , it must be that is completable with respect to . Then is also completable with respect to , as depends only upon .\nThus, invoking Zorn\u2019s lemma, has a maximal element . By Lemma 3.4 ###reference_theorem4###, it must be that does not leave a single variable unassigned, otherwise it could be augmented with an additional assignment. There thus exists a total assignment that is finitely completable. As it has no free variables, it must indeed be maintaining all functions in below . The claim follows.\n\u220e\n\n\nA corollary to Theorem 3.3 ###reference_theorem3### is that the same claim holds when the target values vary over the functions , as translations and scalings of proper functions are proper.\nLet be an arbitrary domain, a label set, and a loss function such that is a proper metric space.\nThen the following are equivalent for any and :\nis learnable in the realizable case with transductive sample function .\nFor any finite and finite , is learnable in the realizable case with transductive sample function .\nis immediate. For the reverse direction, fix an and set . Then fix a sequence of unlabeled datapoints . It suffices to demonstrate that a transductive learner for on instances of the form can be designed which attains error .\nWe will capture the task of transductively learning on such instances by way of a certain collection of functions and of variables. Each variable will be permitted to take values in , while each function depends upon exactly variables in and outputs values in . More precisely, let and . These serve merely as representations for the functions in and variables in , not their true definitions (which will be established shortly). Note now that by suppressing the unlabeled datapoints of , we can equivalently represent elements of as sequences in and elements of as sequences in . In this view, each element of is precisely an element of which had exactly one entry replaced with a . See Figure 1 ###reference_###.\nNow, to model transductive learning, fix an element represented by . Then we will define to be a function depending upon the variables , where .\nGiven assignments for each of the variables as values in \u2014 semantically, completions of their \u201c?\u201d entries \u2014 the node then outputs the value .\nThe two crucial observations are as follows: an assignment of each corresponds precisely to the action of a learner responding to a query at test time, and the output of node equals the error of a learner when is the ground truth.\nThus, it remains to show that the variables in can all be assigned so as to keep the outputs of the functions in less than . The condition (2.) grants us that this is true for each finite collection of functions . Now note that the functions are proper, as each such is continuous and reflects bounded sets, and as itself is proper. Invoke Theorem 3.3 ###reference_theorem3### to complete the proof.\n\u220e\nTheorem 3.6 ###reference_theorem6### establishes an exact compactness in learning with respect to a flexible class of metric loss functions. One may note, however, that some non-metric losses are of central importance to machine learning, including the squared error on compact subsets of (which violates the triangle inequality) and the cross-entropy loss for finite-dimensional distributions (which is not symmetric). We now provide a modified form of Theorem 3.6 ###reference_theorem6### which captures these loss functions, in which the loss function is permitted to differ from the underlying metric on . (E.g., such that is the usual Euclidean norm on a compact subset of , and is any continuous loss function.)\nLet be an arbitrary domain, a compact metric space, and a hypothesis class. Let be a loss function employed for learning that is continuous with respect to the metric . Then the following are equivalent for any :\nis learnable in the realizable case with transductive sample function .\nFor any finite and finite , is learnable in the realizable case with transductive sample function .\nWe adopt precisely the perspective of Theorem 3.6 ###reference_theorem6###, seeing transductive learning modeled as a variable assignment problem with the same variables and functions . To invoke Theorem 3.3 ###reference_theorem3###, it remains only to show that the functions are proper. First note a continuous function from a compact space to is automatically proper, as closed subsets of compact sets are compact. Now recall that each is a sum of scaled copies of with one input fixed. As each such function is continuous, itself is continuous and thus proper.\n\u220e"
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "3.2",
|
| 55 |
+
"parent_section_id": "3",
|
| 56 |
+
"section_name": "Realizable Learning With Improper Loss Functions",
|
| 57 |
+
"text": "It is natural to ask whether the requirement that be a proper metric space is essential to Theorem 3.6 ###reference_theorem6### or merely an artifact of the proof. We now demonstrate the former: for arbitrary metric losses, the error rate of learning can exceed that of all its finite projections by a factor of 2. Recall that denotes the transductive error rate of learning a class , i.e., denotes the error incurred by an optimal learner for on (worst-case) samples of size .\nThere exists a hypothesis class , metric loss function on , and such that for any finite and finite , .\nLet us describe the main idea of Theorem 3.8 ###reference_theorem8###, whose proof is deferred to Appendix C.1 ###reference_###. The crucial step lies in the creation of the label space , where is an infinite set whose points are all distance 2 apart, and is an infinite set whose elements are indexed by the finite subsets of , e.g., as in for finite . For all such , define to be distance 1 from the elements of , distance 2 from the other elements of , and distance 1 from all other points in . Then indeed forms a metric space, and it is straightforward to see that, for instance, the class of all functions from a one-element set to is more difficult to learn than its finite projections (equivalently, finite subsets).\nWe now prove a matching upper bound to Theorem 3.8 ###reference_theorem8###, demonstrating that a factor of 2 is the greatest possible gap between the error rate of and its projections when the loss function is a metric.\nLet be a label set with a metric loss function and a hypothesis class. Fix , and suppose that for any finite and finite , has transductive error rate . Then has transductive error rate .\nThe proof of Theorem 3.9 ###reference_theorem9### is deferred to Appendix C.2 ###reference_###, but let us briefly sketch the main idea. Fix , , and set . Consider again the collection of functions and variables , as described in the proof of Theorem 3.6 ###reference_theorem6###. By the premise of the theorem, for any finite subset , there exists an assignment of variables which maintains all functions in below . Each such assignment induces an apportionment of error to each function in , i.e., a vector of length with positive entries summing to . For depending upon variables , this apportionment tracks the contribution of each to the output of . The central technical step of the proof is to demonstrate that one can assign apportionments to each node such that any finite subset of the apportionments can be satisfied by an assignment of variables . Then let be a variable. We assign to the value such that the function has minimal budget apportioned to , among all such . From an invocation of the triangle inequality, this learner at most doubles the output of any .\nRecall from Section 3.1 ###reference_### that proper metric spaces are sufficiently expressive to describe many of the most frequently studied label spaces, including (equipped with any norm) and its closed subsets. What, then, is a typical example of a label space which fails to be proper? Perhaps the most natural example is multiclass classification over infinite label sets, i.e., equipped with the discrete metric . We will now demonstrate, however, that the particular structure of multiclass classification can be exploited to recover an exact compactness result in the style of Theorem 3.6 ###reference_theorem6###. Notably, we do so by invoking M. Hall\u2019s classic matching theorem for infinite graphs, which for good measure we show to be a special case of our Theorem 3.3 ###reference_theorem3###.\nLet be a bipartite graph. An -matching is a set of disjoint edges which covers . A graph with an -matching is said to be -matchable.\nA bipartite graph is finitely -matchable if for each finite subset of , there exists a set of disjoint edges which covers .\nM. Hall\u2019s theorem states that an infinite bipartite graph is -matchable if and only if it is finitely -matchable, provided that all nodes in have finite degree. Before proving M. Hall\u2019s theorem by way of Theorem 3.3 ###reference_theorem3###, we establish an intermediate lemma.\nLet be a bipartite graph such that all nodes have finite degree and is finitely -matchable. Then there exists a collection of edges such that is finitely -matchable and all nodes in have finite degree.\nThe proof of Lemma 3.12 ###reference_theorem12### is deferred to Appendix C.3 ###reference_###, but its intuition is fairly simple: by P. Hall\u2019s theorem, is finitely -matchable precisely when Hall\u2019s condition holds, i.e., for all finite (Hall, 1935 ###reference_b14###). Thus any which is not incident to a Hall blocking set can be removed from while preserving Hall\u2019s condition and finite -matchability. Proceeding in this way, nodes can be removed until each remaining is contained in a Hall blocking set . At this point, \u2019s incident edges can be safely restricted to those which are incident with , a finite set.\nWe now prove M. Hall\u2019s theorem as a consequence of our Theorem 3.3 ###reference_theorem3###.\nLet be a bipartite graph in which all nodes have finite degree. Then has an -matching if and only if it is finitely -matchable.\nThe forward direction is clear. For the reverse, suppose is finitely -matchable. Then we may assume as a consequence of Lemma 3.12 ###reference_theorem12### that the nodes in have finite degree as well. Let us think of each node as a variable residing in the discrete metric space on its neighbors. We will also think of each node as a function of its neighbors, which outputs the number of neighbors that have not been assigned to itself. Note that the discrete metric space on finitely many elements is proper, and furthermore that any function out of such a space is automatically proper. Then invoke Theorem 3.3 ###reference_theorem3### with to complete the proof. (See Remark 3.5 ###reference_theorem5###.)\n\u220e\nLet be a classification problem, i.e., employing the 0-1 loss function. Then the following are equivalent for any :\nis learnable in the realizable case with transductive sample function .\nFor any finite and finite , is learnable in the realizable case with transductive sample function .\nCertainly . Then suppose (2.) and fix . Now consider the bipartite graph with , , and where edges in connect functions agreeing on common inputs. Then a learner for instances of the form amounts precisely to a choice of incident node (equivalently, edge) for each . Furthermore, such a learner attains error precisely when its selected edges contribute indegree at least to each node in . Using a splitting argument (i.e., creating copies of each node in ), this is equivalent to asking for an -perfect matching in a graph which, by (2.), is finitely -matchable. Note that each node in has degree and appeal to Theorem 3.13 ###reference_theorem13### to complete the proof.\n\u220e"
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "3.3",
|
| 61 |
+
"parent_section_id": "3",
|
| 62 |
+
"section_name": "Agnostic Learning",
|
| 63 |
+
"text": "Our discussion thus far has restricted attention to realizable learning: what can be said of the agnostic case? In short, all results from Sections 3.1 ###reference_### and 3.2 ###reference_### can be claimed for agnostic learning (with nearly identical proofs), with the exception of Theorem 3.9 ###reference_theorem9###. To begin, let us briefly review transductive learning in the agnostic case. See Asilis et al. (2024 ###reference_b4###) or Dughmi et al. (2024 ###reference_b13###) for further detail.\nThe setting of transductive learning in the agnostic case is defined as follows:\nAn adversary selects a collection of labeled datapoints .\nThe unlabeled datapoints in are all revealed to the learner.\nOne labeled datapoint is selected uniformly at random from . The remaining labeled datapoints are displayed to the learner.\nThe learner is prompted to predict the label of .\nNotably, transductive learning in the agnostic case differs from the realizable case in that the adversary is no longer restricted to label the datapoints in using a hypothesis . To compensate for the increased difficulty, and in accordance with the PAC definition of agnostic learning, a learner is only judged relative to best-in-class performance across . Formally,\nFurthermore, one can use nearly identical reasoning as in the proofs of Theorems 3.6 ###reference_theorem6### and 3.7 ###reference_theorem7### to see that agnostic transductive learning is described by a system of variables and functions . In particular, set and let contain all sequences with exactly one . Then a function depends upon the variables , where and\n.\nNow, as in the realizable case, a learner corresponds precisely to an assignment of each variable to a value in , and incurs agnostic transductive error at most if and only if the outputs of all nodes in are maintained below . Under the conditions of Theorems 3.6 ###reference_theorem6### or 3.7 ###reference_theorem7###, exact compactness of sample complexity thus comes as an immediate consequence of Theorem 3.3 ###reference_theorem3### and our preceding discussion. Furthermore, when bears a discrete metric, learning reduces to an assignment problem in graphs, and exact compactness follows from a straightforward splitting argument applied to M. Hall\u2019s matching theorem (as in Corollary 3.14 ###reference_theorem14###). We thus have the following theorem.\nLet and the loss function satisfy the conditions of Theorem 3.6 ###reference_theorem6###, Theorem 3.7 ###reference_theorem7###, or Corollary 3.14 ###reference_theorem14###. Then the following conditions are equivalent for any and :\nis learnable in the agnostic case with transductive sample function .\nFor any finite and finite , is learnable in the agnostic case with transductive sample function .\nRegarding improper metric losses, note that our lower bound from Theorem 3.8 ###reference_theorem8### transfers directly to the agnostic case, as it established for a hypothesis class for which agnostic learning is precisely as difficult as realizable learning. We conjecture that larger differences in such error rates \u2014 perhaps of arbitrarily large ratio \u2014 are possible for the agnostic case."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "3.4",
|
| 67 |
+
"parent_section_id": "3",
|
| 68 |
+
"section_name": "PAC Learning",
|
| 69 |
+
"text": "Though our results have thus far been phrased in the language of transductive learning, we now demonstrate that they may be easily extended (in an approximate manner) to Valiant\u2019s celebrated PAC model (Valiant, 1984 ###reference_b22###).\nThe PAC model makes use of probability measures over , for which the true error incurred by a predictor is defined as .\nLet be a collection of probability measures over and a hypothesis class. A learner is a PAC learner for with respect to if there exists a sample function such that the following holds: for any and , a -i.i.d. sample with is such that, with probability at least over the choice of ,\nAgnostic PAC learning refers to the case in which consists of all measures over , and realizable PAC learning to the case in which .\nThe sample complexity of a learner with respect to a hypothesis class , , is the minimal sample function it attains as a learner for . The sample complexity of a class is the pointwise minimal sample complexity attained by any of its learners, i.e., .\nAs previously mentioned, transductive learning bears a close connection to PAC learning: see Asilis et al. (2024 ###reference_b4###) and Dughmi et al. (2024 ###reference_b13###) for further detail on their approximate equivalence.\nLet be a domain, a label set, and a hypothesis class. Fix a loss function taking values in . Then the following inequality holds for all and the constant :\nWe now follow through on porting our results from the transductive model to the PAC model. The following is an immediate consequence of applying Lemma 3.19 ###reference_theorem19### to Theorems 3.9 ###reference_theorem9### and 3.7 ###reference_theorem7###.\nLet be a domain, a label set, and a hypothesis class. Suppose that the loss function is bounded and satisfies either of the following conditions:\nis a metric on , or\nis a compact metric space and is continuous with respect to this topology.\nThen if all finite projections of are learnable with realizable PAC sample function , is learnable with sample complexity .\nLet us mention briefly that the connection between transductive learning and PAC learning may not be as tight in the agnostic case as in the realizable case. Through a straightforward use of Markov\u2019s inequality and a repetition argument, one can show that agnostic PAC sample complexities exceed transductive by at most a factor of , but this is an unimpressive bound."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "4",
|
| 73 |
+
"parent_section_id": null,
|
| 74 |
+
"section_name": "Conclusion",
|
| 75 |
+
"text": "In this work, we studied the conditions under which the sample complexity of learning a class can be detected by examining its finite projections. Notably, we established exact compactness results for transductive learning with a broad class of proper or continuous loss functions, across both realizable and agnostic learning. Using bounds relating the transductive and PAC models, we were able to transfer many of our results (in an approximate form) to realizable PAC learning. We leave as an open problem whether compactness of agnostic transductive sample complexities can fail by more than a factor of 2 for arbitrary (improper) metric losses. Additional future work includes better understanding the relationship between the transductive and PAC models in the agnostic case, and examining compactness for loss functions which do not satisfy any of our properness, metric, or continuity conditions (though they may be of somewhat limited interest in learning theory). It would also be of interest to study the compactness of error rates in settings other than supervised learning, such as online or unsupervised learning."
|
| 76 |
+
}
|
| 77 |
+
],
|
| 78 |
+
"appendix": [
|
| 79 |
+
{
|
| 80 |
+
"section_id": "Appendix 1",
|
| 81 |
+
"parent_section_id": null,
|
| 82 |
+
"section_name": "Appendix A Proper metric spaces",
|
| 83 |
+
"text": "Several of our results concern proper metric spaces. Let us expand briefly upon this condition, and present an equivalent definition.\nA metric space is proper if either of the following equivalent conditions hold:\nFor all , if is closed and bounded then it is compact.\nFor any and , the closed ball is compact.\nNote that the conditions are indeed equivalent. That (1.) implies (2.) is immediate. Supposing (2.), note that any closed and bounded subset is a closed subset of some closed ball, and thus compact.\nWe now discuss various sufficient conditions in order for a metric space to be proper.\nLet be a metric space. Any of the following conditions suffice to ensure that be a proper metric space.\nis compact.\nis finite.\nis a closed subset of a proper metric space.\nIf is compact, then its closed subsets are all compact. If is finite, then it is compact. If is a closed subset of a proper metric space , then its closed and bounded subsets are compact in and thus compact in .\n\u220e\nRecall now that endowed with the usual Euclidean norm is a proper metric space, owing to the Heine-Borel theorem. Invoking the equivalence of all norms on , it follows that endowed with any norm enjoys the structure of a proper metric space.\nThe following classes of metric spaces are proper:\nAll finite metric spaces.\nAll compact metric spaces.\n, with any norm.\nAny closed subset of , with any norm.\nRegarding necessary conditions for properness, note that all proper metric spaces are complete. Thus subsets of which are not closed will not be proper, e.g., . See, e.g., Williamson and Janos [1987 ###reference_b25###] for additional discussion and properties of proper metric spaces, which are sometimes referred to as Heine-Borel metric spaces."
|
| 84 |
+
},
|
| 85 |
+
{
|
| 86 |
+
"section_id": "Appendix 2",
|
| 87 |
+
"parent_section_id": null,
|
| 88 |
+
"section_name": "Appendix B Distribution-family Learning",
|
| 89 |
+
"text": "The analysis of PAC learning with respect to more flexible distribution classes than the realizable and agnostic cases falls largely under the purview of distribution-family learning [Benedek and Itai, 1991 ###reference_b8###]. Formally, a problem in distribution-family learning of a class is defined by a family of distributions over , such that unlabeled datapoints are drawn from a distribution and labeled by a hypothesis (in the realizable case) or arbitrarily (in the agnostic case).\nNotably, distribution-family learning has infamously resisted any characterization of learnability, combinatorial or otherwise, for the 40 years since its inception. In fact, there is some evidence to suggest that no such characterization may exist [Lechner and Ben-David, 2023 ###reference_b19###]. Furthermore, it is a setting in which uniform convergence fails to characterize learning, rendering ineffective many of the standard and most celebrated techniques of learning theory.\nNevertheless, we now demonstrate that compactness sheds light on the problem of distribution-family learning, at least for the case of well-behaved distribution classes.\nA family of distributions over a set is well-behaved if whenever lies in the support of some , then , the uniform distribution over , lies in as well.\nDefinition B.1 ###reference_theorem1### is sufficiently flexible that we may apply it to distribution-family learning with or to PAC learning over arbitrary distribution classes with . Though it may appear overly restrictive at first glance, note that well-behavedness is satisfied not only for ordinary PAC learning in the agnostic and realizable cases, but also for learning of partial concept classes in the realizable case [Alon et al., 2022 ###reference_b3###] and for the EMX learning of Ben-David et al. [2019 ###reference_b7###]. In particular, though EMX learning is not presented as a supervised learning problem in Ben-David et al. [2019 ###reference_b7###], it can be seen as a binary classification problem over a domain for which consists of those functions outputting finitely many 1\u2019s and contains all realizable, discrete distributions placing all -mass on the label 1.\nCrucially, well-behavedness permits us to study PAC learning by way of transductive error.\nLet be a hypothesis class and a well-behaved family of distributions which are realizable (i.e., ). Fix a loss function taking values in . Then the following inequality holds for all and the constant :\nThe proof is nearly identical to that of [Asilis et al., 2024 ###reference_b4###, Proposition 3.6]. In particular, let denote the sample complexity of learning in the expected error regime (i.e., equals the number of datapoints needed to incur expected error at most ). Then we have\nThe first inequality follows immediately from the fact that the loss function is bounded above by 1. The second inequality follows from a repetition argument, i.e., a learner attaining expected error on samples of size can be boosted to attain expected error with probability by using an additional factor of many samples, as described in [Daniely and Shalev-Shwartz, 2014 ###reference_b11###].\nWe now show that and are essentially equivalent, i.e., that\nThe first inequality follows inequality from a standard leave-one-out argument of Haussler et al. [1994 ###reference_b18###]. The second inequality follows from the fact that for any , there exists an such that many independent draws from the uniform distribution over the entries of has probability at least of containing exactly elements of , as detailed in [Asilis et al., 2024 ###reference_b4###, Lemma A.1]. Crucially, the uniform distribution over lies in owing to well-behavedness of . The claim follows from both of the established chains of inequalities.\n\u220e\nNote that Proposition B.2 ###reference_theorem2### holds for the natural definition of transductive learning with respect to , i.e., in which the adversary must select a sequence of unlabeled datapoints which lie in the support of some .\nIt is now immediate from the proof of Theorem 3.6 ###reference_theorem6### that distribution-family learning is a setting in which the transductive sample complexity of learning is equals the sample complexity of learning its most challenging finite projections. In the following, we let denote the distributions of which place full measure on .\nLet be an arbitrary domain, a proper metric space, and a hypothesis class. Let be a family of well-behaved, realizable distributions. Then the following are equivalent for any :\nis learnable with respect to with transductive sample function .\nFor any finite and finite , is learnable with respect to with transductive sample function .\nAs in Section 3.4 ###reference_###, Proposition B.2 ###reference_theorem2### applied to Theorem B.3 ###reference_theorem3### immediately yields an almost-exact form of compactness for distribution-family PAC learning with realizable and well-behaved distribution classes. This demonstrates that the learnability of even problems as exotic as EMX learning can be detected by examining all their finite projections, provided that no restrictions are placed upon learners. In Ben-David et al. [2019 ###reference_b7###], however, learners were required to only emit hypotheses in . Our work demonstrates that the nature of their undecidability result \u2014 in which the learnability of a class is determined entirely by its cardinality, despite all its projections being easily learned \u2014 could otherwise not appear in supervised learning with metric losses."
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"section_id": "Appendix 3",
|
| 93 |
+
"parent_section_id": null,
|
| 94 |
+
"section_name": "Appendix C Omitted proofs",
|
| 95 |
+
"text": "Set and let be the metric space defined as follows. , where is an infinite set whose points are all distance 2 apart. is an infinite set whose points are indexed by finite subsets of , e.g., as in for finite . For all such , define to be distance 1 from the elements of , distance 2 from the other elements of , and distance 1 from all other points in . Note that indeed forms a metric space as its distance function is positive-definite, symmetric, and only uses the non-zero values of 1 and 2. (In particular, the triangle inequality is satisfied as an automatic consequence of the latter fact.) Now fix an and . Define to consist of all those functions which output on all inputs . Notably, any may take arbitrary values on .\nLet us first analyze the sample complexity of learning a finite projection of . Fix finite and finite . Then, as and are each finite, the images of all the are contained in a finite set . Let decompose as with . Then the following learner attains error on instances of size :\nIn particular, for any and , , while for any , emits the correct prediction by definition of . Furthermore, we may assume without loss of generality that transductive learning instances do not contain repeated datapoints, as these only lessen the difficulty of learning. Thus any sample has at most unlabeled datapoints in , and the previous analysis demonstrates that can be learned with error when .\nOn the other hand, for the case of learning itself, there exist such that the worst-case error incurred by any learner on is at least . In particular, take . As has radius 2 \u2014 and furthermore for any there exists with \u2014 transductively learning with is guaranteed to incur an error of at least in the worst case. (That is, by taking with .) Thus, for and any finite projection we have and , and the claim follows.\n\u220e\nBefore commencing with the proof, let us establish some terminology and a supporting lemma. Fix a hypothesis class along with and . Recall the collection of functions and variables which capture the structure of transductive learning on instances of the form , as described in the proof of Theorem 3.6 ###reference_theorem6###. In this setting, for any number , we will let -apportionment refer to a vector with and . We may suppress and refer simply to apportionments.\nGiven an -apportionment for a node , we will say that an assignment of variables in satisfies this apportionment if the output of as a function decomposes according to . More explicitly, recall that depends upon the variables , where and\nThen an assignment of variables satisfies if for all . Intuitively, tracks the manner in which produces its output. Similarly, given apportionments for various nodes in , we say that a given assignment of variables satisfies the apportionments if it satisfies each of them at once.\nWith a slight abuse of terminology, we say that a node without an apportionment is satisfied by an assignment of variables if its output is at most under the assignment. There should be no risk of confusion, as it will always be clear whether a given node is endowed with an apportionment or not. In a similar fashion to Theorem 3.6 ###reference_theorem6###, we say a partial assignment of apportionments is an assignment of apportionments to a subset of . An assignment which happens to be total is referred to as a total assignment of apportionments. A partial assignment is satisfiable with respect to if there exists an assignment of variables such that all nodes in are satisfied. (I.e., have their apportionments satisfied if equipped with one, and are otherwise simply maintained below .) We say is finitely satisfiable if it is satisfiable with respect to all finite subsets of .333Notably, this is a pointwise condition, not a uniform one. The satisfying assignments are permitted to vary across the finite subsets of .\nLet be a domain, a metric space, and a hypothesis class. Fix and the corresponding collections of functions and variables . Let . If is finitely satisfiable, when none of its elements are endowed with apportionments, then there exists a total assignment of -apportionments to which is finitely satisfiable.\nWe appeal to Zorn\u2019s lemma. The argument relies crucially upon the fact that any partial assignment of -apportionments which is finitely satisfiable can be augmented by assigning an additional apportionment to an unassigned function in .\nLet be a partial assignment of -apportionments to which is finitely satisfiable and leaves a function in unassigned. Then one such unassigned variable can receive an -apportionment while preserving finite satisfiability.\nFix an unassigned variable , along with a finite collection of nodes . Let , where the nodes in are unassigned by (i.e., need only be maintained below ), and those in are assigned by (i.e., need be maintained below and furthermore have their apportionments respected). Let denote the collection of variable assignments which satisfy all nodes in along with . By the supposition that is finitely satisfiable, is non-empty.\nNow let denote all -apportionments for that are satisfied by an assignment in . Informally, these are the -apportionments with which we could endow , if we only needed to consider the nodes . Then let denote the closure of in , and consider the family of sets\nEach set is compact, as it is closed and bounded. Furthermore, finite intersections of such sets are non-empty, as\nThen there exists an -apportionment , where the intersection ranges over all finite subsets of . As we took the closures of the sets , this does not suffice to guarantee us that is non-empty. Let us now increase each of the entries of by an arbitrarily small amount, say , and call the resulting -apportionment .\nFor any finite , recall that , meaning there exist assignments satisfying and which induce -apportionments on of arbitrarily small proximity to . As strictly exceeds in each coordinate, then there exists an assignment satisfying which also satisfies . And was chosen arbitrarily, so the claim follows.\n\u220e\nConsider now the poset whose elements are finitely satisfiable partial assignments of -apportionments to . The partial ordering is such that if agrees with all assignments of apportionments made by and perhaps assigns additional apportionments. It is straightforward to see that chains in have upper bounds: let be a chain and define to be the \u201cunion\" of assignments in , i.e., leaves unassigned if all leave unassigned, otherwise it assigns to the unique apportionment used by the assignments in .\nCertainly serves as an upper bound for , provided that . To see that , fix a finite set . Suppose is the collection of nodes in which receive -apportionments from . Each receives an apportionment from a . Then, as is a chain and is a finite set, there exists an with . By the definition of the partial order with which we endowed , then agrees exactly with when restricted to the set . As , is satisfiable with respect to and thus satisfiable with respect to . As was selected arbitrarily, we have that is finitely satisfiable, meaning and chains indeed have upper bounds.\nThen, invoking Zorn\u2019s lemma, contains a maximal element . By Lemma C.2 ###reference_theorem2###, it must be that does not leave any function in unassigned, otherwise it could be augmented with an additional apportionment, contradicting maximality. Thus there exists a total assignment of -apportionments which is finitely satisfiable, completing the argument.\n\u220e\nWe are now equipped to prove Theorem 3.9 ###reference_theorem9### itself.\nLet be as in the theorem statement, fix an , and let . We will exhibit a learner for attaining error at most on samples of size , for arbitrarily small . To this end, fix one such and an , and recall the system of functions and variables which capture learning on transductive instances of the form . That is, we represent the functions in as and the variables in as . As described in the proof of Theorem 3.6 ###reference_theorem6###, we may suppress the unlabeled datapoints in the definitions of and , and represent each as an element of and each as an element of with exactly one . Recall too that each function , represented by , depends upon the variables , where\nUpon assigning each such variable to an element of (semantically, a completion of its \u201c?\" entry corresponding to a query at test time) the function outputs the value\nThe central observation is that defining a learner for which attains error amounts precisely to assigning each variable in to a value in such that the functions in are all maintained below . By the premise of the theorem, this collection of functions and variables is finitely satisfiable. That is, for each finite , there exists an assignment of all variables such that all functions in are maintained below .444Strictly speaking, the definition of error rate involves an infimum, meaning we are only guaranteed that the functions in can be maintained arbitrarily close to . It is straightforward to see that this suffices for our purposes, however, as Lemma C.1 ###reference_theorem1### results in the addition of an arbitrarily small term to anyway. Then, by Lemma C.1 ###reference_theorem1###, there exists an assignment of -apportionments to each function in which is finitely satisfiable.\nNow fix a node : we will demonstrate how to assign it to a value of . Note that influences a potentially infinite collection of functions . Each such function has an apportionment of error for . Call these values . Recall that each node for computes its error incurred on relative to a label . Now choose an index such that . We then set .\nIn order to analyze this assignment, fix an arbitrary and consider the set . By finite satisfiability of our system of -apportionments, there exists an assignment of variables satisfying the functions in . Let be the value of variable in this assignment. We have:\nNow assign all variables in in this manner, and consider an arbitrary node with error apportionment for each of the variables upon which it depends. Then, using the above analysis, evaluates to at most\ncompleting the argument.\n\u220e\nFix a bipartite graph such that all nodes in have finite degree and is finitely -matchable. We will demonstrate the existence of a subgraph of such that is finitely -matchable.\nFirst note that is finitely -matchable if and only if P. Hall\u2019s condition holds for all finite subsets of . That is, if and only if , where denotes the set of neighbors has in , by Hall [1935 ###reference_b14###]. We refer to a finite subset of as a blocking set if . We say a node is contained in a blocking set if there exists a blocking set such that .\nNow consider all nodes in ; if there exists a node such that is not in any blocking set, then it can be removed from while preserving Hall\u2019s condition in the graph (i.e., while preserving finite -matchability). Repeatedly removing nodes from in this way and applying Zorn\u2019s lemma, we arrive at a collection of nodes such that Hall\u2019s condition is preserved and each node in is contained in a blocking set. Call the resulting graph .\nNow fix a node and pick a blocking set containing . We remove all edges incident to which are not incident to . Let us demonstrate that the remaining graph remains finitely -matchable, i.e., satisfies Hall\u2019s condition. Suppose not, so that there exists a finite set violating Hall\u2019s condition. Then, as satisfies Hall\u2019s condition, it must be that is incident to in but not in . Furthermore, it must be that is a blocking set in , as was . Then consider the set in . We have:\nproducing contradiction with Hall\u2019s condition for . Note that the third line makes use of the fact that , as clearly and furthermore owing to the fact that is incident to both and but not due to a node in (otherwise would have remained incident to in ).\nWe are thus permitted to perform the operation on any single node of to make its degree finite while preserving Hall\u2019s condition. As any failure of Hall\u2019s condition can be detected by way of finitely many nodes in , it follows that we can do so for all nodes of in concert. The resulting graph is a subgraph of which is finitely -matchable and for which all nodes have finite degree, as desired.\n\u220e"
|
| 96 |
+
}
|
| 97 |
+
],
|
| 98 |
+
"tables": {},
|
| 99 |
+
"image_paths": {},
|
| 100 |
+
"validation": true,
|
| 101 |
+
"references": [
|
| 102 |
+
{
|
| 103 |
+
"1": {
|
| 104 |
+
"title": "The one-inclusion graph algorithm is not always optimal.",
|
| 105 |
+
"author": "Ishaq Aden-Ali, Yeshwanth Cherapanamjeri, Abhishek Shetty, and Nikita\nZhivotovskiy.",
|
| 106 |
+
"venue": "In The Thirty Sixth Annual Conference on Learning Theory,\npages 72\u201388. PMLR, 2023a.",
|
| 107 |
+
"url": null
|
| 108 |
+
}
|
| 109 |
+
},
|
| 110 |
+
{
|
| 111 |
+
"2": {
|
| 112 |
+
"title": "Optimal pac bounds without uniform convergence.",
|
| 113 |
+
"author": "Ishaq Aden-Ali, Yeshwanth Cherapanamjeri, Abhishek Shetty, and Nikita\nZhivotovskiy.",
|
| 114 |
+
"venue": "In 2023 IEEE 64th Annual Symposium on Foundations of Computer\nScience (FOCS), pages 1203\u20131223. IEEE Computer Society, 2023b.",
|
| 115 |
+
"url": null
|
| 116 |
+
}
|
| 117 |
+
},
|
| 118 |
+
{
|
| 119 |
+
"3": {
|
| 120 |
+
"title": "A theory of pac learnability of partial concept classes.",
|
| 121 |
+
"author": "Noga Alon, Steve Hanneke, Ron Holzman, and Shay Moran.",
|
| 122 |
+
"venue": "In 2021 IEEE 62nd Annual Symposium on Foundations of Computer\nScience (FOCS), pages 658\u2013671. IEEE, 2022.",
|
| 123 |
+
"url": null
|
| 124 |
+
}
|
| 125 |
+
},
|
| 126 |
+
{
|
| 127 |
+
"4": {
|
| 128 |
+
"title": "Regularization and optimal multiclass learning.",
|
| 129 |
+
"author": "Julian Asilis, Siddartha Devic, Shaddin Dughmi, Vatsal Sharan, and Shang-Hua\nTeng.",
|
| 130 |
+
"venue": "In The Thirty Seventh Annual Conference on Learning Theory.\nPMLR, 2024.",
|
| 131 |
+
"url": null
|
| 132 |
+
}
|
| 133 |
+
},
|
| 134 |
+
{
|
| 135 |
+
"5": {
|
| 136 |
+
"title": "Optimal learners for realizable regression: PAC learning and online\nlearning.",
|
| 137 |
+
"author": "Idan Attias, Steve Hanneke, Alkis Kalavasis, Amin Karbasi, and Grigoris\nVelegkas.",
|
| 138 |
+
"venue": "In Thirty-seventh Conference on Neural Information Processing\nSystems, 2023.",
|
| 139 |
+
"url": null
|
| 140 |
+
}
|
| 141 |
+
},
|
| 142 |
+
{
|
| 143 |
+
"6": {
|
| 144 |
+
"title": "Online learning versus offline learning.",
|
| 145 |
+
"author": "Shai Ben-David, Eyal Kushilevitz, and Yishay Mansour.",
|
| 146 |
+
"venue": "Machine Learning, 29:45\u201363, 1997.",
|
| 147 |
+
"url": null
|
| 148 |
+
}
|
| 149 |
+
},
|
| 150 |
+
{
|
| 151 |
+
"7": {
|
| 152 |
+
"title": "Learnability can be undecidable.",
|
| 153 |
+
"author": "Shai Ben-David, Pavel Hrube\u0161, Shay Moran, Amir Shpilka, and Amir\nYehudayoff.",
|
| 154 |
+
"venue": "Nature Machine Intelligence, 1(1):44\u201348,\n2019.",
|
| 155 |
+
"url": null
|
| 156 |
+
}
|
| 157 |
+
},
|
| 158 |
+
{
|
| 159 |
+
"8": {
|
| 160 |
+
"title": "Learnability with respect to fixed distributions.",
|
| 161 |
+
"author": "Gyora M Benedek and Alon Itai.",
|
| 162 |
+
"venue": "Theoretical Computer Science, 86(2):377\u2013389, 1991.",
|
| 163 |
+
"url": null
|
| 164 |
+
}
|
| 165 |
+
},
|
| 166 |
+
{
|
| 167 |
+
"9": {
|
| 168 |
+
"title": "When does optimizing a proper loss yield calibration?",
|
| 169 |
+
"author": "Jaroslaw Blasiok, Parikshit Gopalan, Lunjia Hu, and Preetum Nakkiran.",
|
| 170 |
+
"venue": "Advances in Neural Information Processing Systems, 36, 2023.",
|
| 171 |
+
"url": null
|
| 172 |
+
}
|
| 173 |
+
},
|
| 174 |
+
{
|
| 175 |
+
"10": {
|
| 176 |
+
"title": "A characterization of multiclass learnability.",
|
| 177 |
+
"author": "Nataly Brukhim, Daniel Carmon, Irit Dinur, Shay Moran, and Amir Yehudayoff.",
|
| 178 |
+
"venue": "In 2022 IEEE 63rd Annual Symposium on Foundations of Computer\nScience (FOCS), pages 943\u2013955. IEEE, 2022.",
|
| 179 |
+
"url": null
|
| 180 |
+
}
|
| 181 |
+
},
|
| 182 |
+
{
|
| 183 |
+
"11": {
|
| 184 |
+
"title": "Optimal learners for multiclass problems.",
|
| 185 |
+
"author": "Amit Daniely and Shai Shalev-Shwartz.",
|
| 186 |
+
"venue": "In Conference on Learning Theory, pages 287\u2013316. PMLR, 2014.",
|
| 187 |
+
"url": null
|
| 188 |
+
}
|
| 189 |
+
},
|
| 190 |
+
{
|
| 191 |
+
"12": {
|
| 192 |
+
"title": "A colour problem for infinite graphs and a problem in the theory of\nrelations.",
|
| 193 |
+
"author": "NG De Bruijn and P Erd\u00f6s.",
|
| 194 |
+
"venue": "Indigationes Mathematicae, 13:371\u2013373, 1951.",
|
| 195 |
+
"url": null
|
| 196 |
+
}
|
| 197 |
+
},
|
| 198 |
+
{
|
| 199 |
+
"13": {
|
| 200 |
+
"title": "Is transductive learning equivalent to pac learning?",
|
| 201 |
+
"author": "Shaddin Dughmi, Yusuf Kalayci, and Grayson York.",
|
| 202 |
+
"venue": "arXiv preprint arXiv:2405.05190, 2024.",
|
| 203 |
+
"url": null
|
| 204 |
+
}
|
| 205 |
+
},
|
| 206 |
+
{
|
| 207 |
+
"14": {
|
| 208 |
+
"title": "On representatives of subsets.",
|
| 209 |
+
"author": "P. Hall.",
|
| 210 |
+
"venue": "Journal of the London Mathematical Society, s1-10(1):26\u201330, 1935.",
|
| 211 |
+
"url": null
|
| 212 |
+
}
|
| 213 |
+
},
|
| 214 |
+
{
|
| 215 |
+
"15": {
|
| 216 |
+
"title": "Distinct representatives of subsets.",
|
| 217 |
+
"author": "Marshall Hall Jr.",
|
| 218 |
+
"venue": "Bulletin of the American Mathematical Society, 54(10):922\u2013926, 1948.",
|
| 219 |
+
"url": null
|
| 220 |
+
}
|
| 221 |
+
},
|
| 222 |
+
{
|
| 223 |
+
"16": {
|
| 224 |
+
"title": "The marriage problem.",
|
| 225 |
+
"author": "Paul Halmos and Herbert Vaughan.",
|
| 226 |
+
"venue": "American Journal of Mathematics, 72(1):214\u2013215, 1950.",
|
| 227 |
+
"url": null
|
| 228 |
+
}
|
| 229 |
+
},
|
| 230 |
+
{
|
| 231 |
+
"17": {
|
| 232 |
+
"title": "A trichotomy for transductive online learning.",
|
| 233 |
+
"author": "Steve Hanneke, Shay Moran, and Jonathan Shafer.",
|
| 234 |
+
"venue": "Advances in Neural Information Processing Systems, 36, 2023.",
|
| 235 |
+
"url": null
|
| 236 |
+
}
|
| 237 |
+
},
|
| 238 |
+
{
|
| 239 |
+
"18": {
|
| 240 |
+
"title": "Predicting 0, 1-functions on randomly drawn points.",
|
| 241 |
+
"author": "David Haussler, Nick Littlestone, and Manfred K Warmuth.",
|
| 242 |
+
"venue": "Information and Computation, 115(2):248\u2013292, 1994.",
|
| 243 |
+
"url": null
|
| 244 |
+
}
|
| 245 |
+
},
|
| 246 |
+
{
|
| 247 |
+
"19": {
|
| 248 |
+
"title": "Impossibility of characterizing distribution learning\u2013a simple\nsolution to a long-standing problem.",
|
| 249 |
+
"author": "Tosca Lechner and Shai Ben-David.",
|
| 250 |
+
"venue": "arXiv preprint arXiv:2304.08712, 2023.",
|
| 251 |
+
"url": null
|
| 252 |
+
}
|
| 253 |
+
},
|
| 254 |
+
{
|
| 255 |
+
"20": {
|
| 256 |
+
"title": "Adversarially robust learning: A generic minimax optimal learner and\ncharacterization.",
|
| 257 |
+
"author": "Omar Montasser, Steve Hanneke, and Nati Srebro.",
|
| 258 |
+
"venue": "Advances in Neural Information Processing Systems,\n35:37458\u201337470, 2022.",
|
| 259 |
+
"url": null
|
| 260 |
+
}
|
| 261 |
+
},
|
| 262 |
+
{
|
| 263 |
+
"21": {
|
| 264 |
+
"title": "Note on the transfinite case of hall\u2019s theorem on representatives.",
|
| 265 |
+
"author": "Richard Rado.",
|
| 266 |
+
"venue": "J. London Math. Soc, 42:321\u2013324, 1967.",
|
| 267 |
+
"url": null
|
| 268 |
+
}
|
| 269 |
+
},
|
| 270 |
+
{
|
| 271 |
+
"22": {
|
| 272 |
+
"title": "A theory of the learnable.",
|
| 273 |
+
"author": "Leslie G Valiant.",
|
| 274 |
+
"venue": "Communications of the ACM, 27(11):1134\u20131142, 1984.",
|
| 275 |
+
"url": null
|
| 276 |
+
}
|
| 277 |
+
},
|
| 278 |
+
{
|
| 279 |
+
"23": {
|
| 280 |
+
"title": "Estimation of dependences based on empirical data: Springer series in\nstatistics (springer series in statistics), 1982.",
|
| 281 |
+
"author": "Vladimir Vapnik.",
|
| 282 |
+
"venue": null,
|
| 283 |
+
"url": null
|
| 284 |
+
}
|
| 285 |
+
},
|
| 286 |
+
{
|
| 287 |
+
"24": {
|
| 288 |
+
"title": "Theory of pattern recognition, 1974.",
|
| 289 |
+
"author": "Vladimir Vapnik and Alexey Chervonenkis.",
|
| 290 |
+
"venue": null,
|
| 291 |
+
"url": null
|
| 292 |
+
}
|
| 293 |
+
},
|
| 294 |
+
{
|
| 295 |
+
"25": {
|
| 296 |
+
"title": "Constructing metrics with the heine-borel property.",
|
| 297 |
+
"author": "Robert Williamson and Ludvik Janos.",
|
| 298 |
+
"venue": "Proceedings of the American Mathematical Society, 100(3):567\u2013573, 1987.",
|
| 299 |
+
"url": null
|
| 300 |
+
}
|
| 301 |
+
}
|
| 302 |
+
],
|
| 303 |
+
"url": "http://arxiv.org/html/2402.10360v3"
|
| 304 |
+
}
|
20241030/2402.14180v2.json
ADDED
|
@@ -0,0 +1,521 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Linear Transformers are Versatile In-Context Learners",
|
| 3 |
+
"abstract": "Recent research has demonstrated that transformers, particularly linear attention models, implicitly execute gradient-descent-like algorithms on data provided in-context during their forward inference step. However, their capability in handling more complex problems remains unexplored. In this paper, we prove that each layer of a linear transformer maintains a weight vector for an implicit linear regression problem and can be interpreted as performing a variant of preconditioned gradient descent. We also investigate the use of linear transformers in a challenging scenario where the training data is corrupted with different levels of noise. Remarkably, we demonstrate that for this problem linear transformers discover an intricate and highly effective optimization algorithm, surpassing or matching in performance many reasonable baselines. We analyze this algorithm and show that it is a novel approach incorporating momentum and adaptive rescaling based on noise levels. Our findings show that even linear transformers possess the surprising ability to discover sophisticated optimization strategies.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "The transformer architecture (Vaswani et al., 2017 ###reference_b33###) has revolutionized the field of machine learning, driving breakthroughs across various domains and serving as a foundation for powerful models (Anil et al., 2023 ###reference_b5###; Achiam et al., 2023 ###reference_b1###; Team et al., 2023 ###reference_b30###; Jiang et al., 2023 ###reference_b19###). However, despite their widespread success, the mechanisms that drive their performance remain an active area of research. A key component of their success is attributed to in-context learning (ICL, Brown et al., 2020 ###reference_b7###) \u2013 an emergent ability of transformers to make predictions based on information provided within the input sequence itself, without explicit parameter updates.\nRecently, several papers (Garg et al., 2022 ###reference_b13###; Aky\u00fcrek et al., 2022 ###reference_b3###; von Oswald et al., 2023a ###reference_b34###) have suggested that ICL might be partially explained by an implicit meta-optimization of the transformers that happens on input context (aka mesa-optimization Hubinger et al., 2019 ###reference_b18###). They have shown that transformers with linear self-attention layers (aka linear transformers) trained on linear regression tasks can internally implement gradient-based optimization.\nSpecifically, von Oswald et al. (2023a ###reference_b34###) demonstrated that linear transformers can execute iterations of an algorithm similar to the gradient descent algorithm (which they call GD++), with each attention layer representing one step of the algorithm. Later, Ahn et al. (2023 ###reference_b2###); Zhang et al. (2023 ###reference_b40###) further characterized this behavior, showing that the learned solution is a form of preconditioned GD, and this solution is optimal for one-layer linear transformers.\nIn this paper, we continue to study linear transformers trained on linear regression problems. We prove that each layer of every linear transformer maintains a weight vector for an underlying linear regression problem. Under some restrictions, the algorithm it runs can be interpreted as a complex variant of preconditioned gradient descent with momentum-like behaviors.\nWhile maintaining a linear regression model (regardless of the data) might seem restrictive, we show that linear transformers can discover powerful optimization algorithms. As a first example, we prove that in case of GD++, the preconditioner results in a second order optimization algorithm.\nFurthermore, we demonstrate that linear transformers can be trained to uncover even more powerful and intricate algorithms. We modified the problem formulation to consider mixed linear regression with varying noise levels111We consider a model where each sequence contains data with the same noise level, while different sequences have different noise levels. (inspired by Bai et al., 2023 ###reference_b6###). This is a harder and non-trivial problem with no obvious closed-form solution, since it needs to account for various levels of noise in the input.\nOur experiments with two different noise variance distributions (uniform and categorical) demonstrate the remarkable flexibility of linear transformers. Training a linear transformer in these settings leads to an algorithm that outperforms GD++ as well as various baselines derived from the exact closed-form solution of the ridge regression. We discover that this result holds even when training a linear transformer with diagonal weight matrices.\nThrough a detailed analysis, we reveal key distinctions from GD++, including momentum-like term and adaptive rescaling based on the noise levels.\nOur findings contribute to the growing body of research where novel, high-performing algorithms have been directly discovered through the reverse-engineering of transformer weights. This work expands our understanding of the implicit learning capabilities of attention-based models and highlights the remarkable versatility of even simple linear transformers as in-context learners. We demonstrate that transformers have the potential to discover effective algorithms that may advance the state-of-the-art in optimization and machine learning in general."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Preliminaries",
|
| 15 |
+
"text": "In this section we introduce notations for linear transformers, data, and type of problems we consider."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "2.1",
|
| 19 |
+
"parent_section_id": "2",
|
| 20 |
+
"section_name": "Linear transformers and in-context learning",
|
| 21 |
+
"text": "Given input sequence , a single head in a linear self-attention layer is usually parameterized by four matrices, key , query , value and projection . The output of the non-causal layer at position is where is computed as\nEquivalently, one can use parameters and , and the equation becomes\nMultiple heads simply sum their effects\nWe define a linear transformer as a multi-layer neural network composed of linear self-attention layers parameterized by for . To isolate the core mechanisms, we consider a simplified decoder-only architecture, excluding MLPs and LayerNorm components. This architecture was also used in previous work (von Oswald et al., 2023a ###reference_b34###; Ahn et al., 2023 ###reference_b2###).\nWe consider two versions of linear transformers: Full with the transformer parameters represented by full matrices and Diag, where the parameters are restricted to diagonal matrices only.\nInspired by von Oswald et al. (2023a ###reference_b34###), in this paper we consider regression data as the token sequence. Each token consists of a feature vector and its corresponding output . Additionally, we append a query token to the sequence, where represents test data. The goal of in-context learning is to predict for the test data . We constrain the attention to only focus on the first tokens of the sequence so that it ignores the query token.\nWe use to denote the -th token in the transformer\u2019s output at layer . The initial layer is simply the input: . For a model with parameters , we read out the prediction by taking the negative222We set the actual prediction to , similar to von Oswald et al. (2023a ###reference_b34###), because it\u2019s easier for linear transformers to predict . of the last coordinate of the final token in the last layer as .\nLet\u2019s also define the following notation to be used throughout the paper"
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2.2",
|
| 25 |
+
"parent_section_id": "2",
|
| 26 |
+
"section_name": "Noisy regression model",
|
| 27 |
+
"text": "As a model problem, we consider data generated from a noisy linear regression model. For each input sequence , we sample a ground-truth weight vector , and generate data points as and , with noise .\nNote that each sequence can have different ground-truth weight vectors , but every data point in the sequence shares the same and . The query is generated as and (since the noise is independent, whether we include noise in will only be an additive constant to the final objective).\nWe further define an ordinary least square (OLS) loss as\nThe OLS solution is with residuals .\nIn the presence of noise , in general is not equal to the ground truth . For a known noise level , the best estimator for is provided by ridge regression:\nwith solution . Of course, in reality the variance of the noise is not known and has to be estimated from the data."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "2.3",
|
| 31 |
+
"parent_section_id": "2",
|
| 32 |
+
"section_name": "Fixed vs. mixed noise variance problems",
|
| 33 |
+
"text": "We consider two different problems within the noisy linear regression framework.\nIn this scenario, the variance remains constant for all the training data. Here, the in-context loss is:\nwhere and . This problem was initially explored by Garg et al. (2022 ###reference_b13###). Later, von Oswald et al. (2023a ###reference_b34###) have demonstrated that a linear transformer (6 ###reference_###) converges to a form of a gradient descent solution, which they called GD++. We define this in details later.\nIn this case, the noise variance is drawn from some fixed distribution for each sequence. The in-context learning loss becomes:\nIn other words, each training sequence has a fixed noise level , but different training sequences have different noise levels sampled from a specified distribution . This scenario adds complexity because the model must predict for changing noise distribution, and the optimal solution likely would involve some sort of noise estimation. We have found that empirically, GD++ fails to model this noise variance and instead converges to a solution which can be interpreted as a single noise variance estimate across all input data."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "3",
|
| 37 |
+
"parent_section_id": null,
|
| 38 |
+
"section_name": "Related work",
|
| 39 |
+
"text": ""
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "4",
|
| 43 |
+
"parent_section_id": null,
|
| 44 |
+
"section_name": "Linear transformers maintain linear regression model at every layer",
|
| 45 |
+
"text": "While large, nonlinear transformers can model complex relationship, we show that linear transformers are restricted to maintaining a linear regression model based on the input, in the sense that the -th layer output is always a linear function of the input with latent (and possibly nonlinear) coefficients.\nSuppose the output of a linear transformer at -th layer is , then there exists matrices , vectors and scalars such that\nNote that , and are not linear in the input, but this still poses restrictions on what the linear transformers can do. For example we show that it cannot represent a quadratic function:\nSuppose the input to a linear transformer is where and , let the -th layer output be and let and (here is just the first coordinate of ), then when with high probability the cosine similarity of and is at most 0.1.\nTheorem 4.1 ###reference_theorem1### implies that the output of linear transformer can always be explained as linear combinations of input with latent weights and . The matrices , vectors and numbers are not linear and can in fact be quite complex, which we characterize below:\nIn the setup of Theorem 4.1 ###reference_theorem1###, if we let\nthen one can recursively compute matrices , vectors and numbers for every layer using\nwith the init. condition .\nThe updates to the parameters are complicated and nonlinear, allowing linear transformers to implement powerful algorithms, as we will later see in Section 5 ###reference_###. In fact, even with diagonal and , they remain flexible. The updates in this case can be further simplified to a more familiar form:\nIn the setup of Theorem 4.1 ###reference_theorem1### with diagonal parameters,\n are updated as\nHere are matrices and numbers that depend on in Lemma 4.3 ###reference_theorem3###.\nNote that is (proportional to) the gradient of a linear model . This makes the updates similar to a gradient descent with momentum:\nOf course, the formula in Lemma 4.4 ###reference_theorem4### is still much more complicated with matrices in places of and , and also including a gradient term for the update of ."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "5",
|
| 49 |
+
"parent_section_id": null,
|
| 50 |
+
"section_name": "Power of diagonal attention matrices",
|
| 51 |
+
"text": "Although linear transformers are constrained, they can solve complex in-context learning problems. Empirically, we have found that they are able to very accurately solve linear regression with mixed noise variance (7 ###reference_###), with final learned weights that are very diagonal heavy with some low-rank component (see Fig. 4 ###reference_###). Surprisingly, the final loss remains remarkably consistent even when their and matrices (3 ###reference_###) are diagonal. Here we will analyze this special case and explain its effectiveness.\nSince the elements of are permutation invariant, a diagonal parameterization reduces each attention heads to just four parameters:\nIt would be useful to further reparametrize the linear transformer (3 ###reference_###) using:\nThis leads to the following diagonal layer updates:\nFour variables , , , represent information flow between the data and the labels across layers. For instance, the term controlled by measures information flow from to , measures the flow from to and so forth. Since the model can always be captured by these 4 variables, having many heads does not significantly increase its representation power. When there is only one head the equation is always true, while models with more than one head do not have this limitation. However empirically even models with one head is quite powerful."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "5.1",
|
| 55 |
+
"parent_section_id": "5",
|
| 56 |
+
"section_name": "GD++ and least squares solver",
|
| 57 |
+
"text": "GD++, introduced in von Oswald et al. (2023a ###reference_b34###), represents a linear transformer that is trained on a fixed noise variance problem (6 ###reference_###). It is a variant of a diagonal linear transformer, with all the heads satisfying . Dynamics are influenced only by and , leading to simpler updates:\nThe update on acts as preconditioning and the update on performs gradient descent on the data.\nWhile existing analysis by Ahn et al. (2023 ###reference_b2###) has not yielded fast convergence rates for GD++, we show here that it is actually a second-order optimization algorithm for the least squares problem (4 ###reference_###):\nGiven where has eigenvalues in the range with a condition number . Let be the optimal solution to least squares problem (4 ###reference_###), then there exists hyperparameters for GD++ algorithm that outputs with accuracy in steps. In particular that implies there exists an -layer linear transformer that can solve this task.\nThe convergence rate of is typically achieved only by second-order algorithms such as Newton\u2019s method."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "5.2",
|
| 61 |
+
"parent_section_id": "5",
|
| 62 |
+
"section_name": "Understanding : adaptive rescaling",
|
| 63 |
+
"text": "If a layer only has , it has a rescaling effect. The amount of scaling is related to the amount of noise added in a model selection setting. The update rule for this layer is:\nThis rescales every by a factor that depends on . When , this shrinks of the output based on the norm of in the previous layer. This is useful for the mixed noise variance problem, as ridge regression solution scales the least squares solution by a factor that depends on the noise level.\nSpecifically, assuming , the ridge regression solution becomes , which is exactly a scaled version of the OLS solution. Further, when noise is larger, the scaled factor is smaller, which agrees with the behavior of a negative .\nWe can show that using adaptive scaling even a 2-layer linear transformer can be enough to solve a simple example of categorical mixed noise variance problem and :\nSuppose the input to the transformer is , where , . Here is the noise whose noise level can take one of two values: or . Then as goes to , there exists a set of parameters for two-layer linear transformers such that the implicit of the linear transformer converges to the optimal ridge regression results (and the output of the linear transformer is ). Further, the first layer only has being nonzero and the second layer only has being nonzero."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "5.3",
|
| 67 |
+
"parent_section_id": "5",
|
| 68 |
+
"section_name": "Understanding : adapting step-sizes",
|
| 69 |
+
"text": "The final term in the diagonal model, , has a more complicated effect. Since it changes only the -coordinates, it does not have an immediate effect on . To understand how it influences the we consider a simplified two-step process, where the first step only has and the second step only has (so the second step is just doing one step of gradient descent). In this case, the first layer will update the \u2019s as:\nThere are two effects of the term, one is a multiplicative effect on , and the other is an additive term that makes -output related to the residual . The multiplicative step in has an unknown preconditioning effect. For simplicity we assume the multiplicative term is small, that is:\nThe first layer does not change , so and . For this set of , we can write down the output on in the second layer as\n###figure_1### ###figure_2### Here we used the properties of residual (in particular , and ). Note that is basically what a gradient descent step on the original input should do. Therefore effectively, the two-layer network is doing gradient descent, but the step size is the product of and . The factor depends on the level of noise, and when , the effective step size is smaller when there is more noise. This is especially helpful in the model selection problem, because intuitively one would like to perform early-stopping (small step sizes) when the noise is high."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "6",
|
| 73 |
+
"parent_section_id": null,
|
| 74 |
+
"section_name": "Experiments",
|
| 75 |
+
"text": "In this section, we investigate the training dynamics of linear transformers when trained with a mixed noise variance problem (7 ###reference_###). We evaluate three types of single-head linear transformer models:\nFull. Trains full parameter matrices.\nDiag. Trains diagonal parameter matrices (10 ###reference_###).\nGD++. An even more restricted diagonal variant defined in (11 ###reference_###).\nFor each experiment, we train each linear transformer modifications with a varying number of layers ( to ) using using Adam optimizer for iterations with a learning rate of and a batch size of . In some cases, especially for a large number of layers, we had to adjust the learning rate to prevent stability issues. We report the best result out of runs with different training seeds. We used in-context examples in dimensions. We evaluated the algorithm using novel sequences. All the experiments were done on a single H100 GPU with 80GB of VRAM. It took on average 4\u201312 hours to train a single algorithm, however experimenting with different weight decay parameters, better optimizer and learning rate schedule will likely reduce this number dramatically.\nWe use adjusted evaluation loss as our main performance metric. It is calculated by subtracting the oracle loss from the predictor\u2019s loss. The oracle loss is the closed-form solution of the ridge regression loss (5 ###reference_###), assuming the noise variance is known. The adjusted evaluation loss allows for direct model performance comparison across different noise variances. This is important because higher noise significantly degrades the model prediction. Our adjustment does not affect the model\u2019s optimization process, since it only modifies the loss by an additive constant.\n###figure_3### ###figure_4### ###figure_5###"
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "7",
|
| 79 |
+
"parent_section_id": null,
|
| 80 |
+
"section_name": "Conclusions",
|
| 81 |
+
"text": "Our research reveals the surprising ability of linear transformers to tackle challenging in-context learning problems. We show that each layer maintains an implicit linear regression model, akin to a complex variant of preconditioned gradient descent with momentum-like behavior.\nRemarkably, when trained on noisy linear regression problems with unknown noise variance, linear transformers not only outperform standard baselines but also uncover a sophisticated optimization algorithm that incorporates noise-aware step-size adjustments and rescaling. This discovery highlights the potential of linear transformers to automatically discover novel optimization algorithms when presented with the right problems, opening exciting avenues for future research, including automated algorithm discovery using transformers and generalization to other problem domains.\nWhile our findings demonstrate the impressive capabilities of linear transformers in learning optimization algorithms, we acknowledge limitations in our work. These include the focus on simplified linear models, analysis of primarily diagonal attention matrices, and the need for further exploration into the optimality of discovered algorithms, generalization to complex function classes, scalability with larger datasets, and applicability to more complex transformer architectures. We believe these limitations present valuable directions for future research and emphasize the need for a deeper understanding of the implicit learning mechanisms within transformer architectures."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "8",
|
| 85 |
+
"parent_section_id": null,
|
| 86 |
+
"section_name": "Acknowledgements",
|
| 87 |
+
"text": "The authors would like to thank Nolan Miller and Andrey Zhmoginov for their valuable suggestions and feedback throughout the development of this project. Part of this work was done while Rong Ge was visiting Google Research. Rong Ge\u2019s research is supported in part by NSF Award DMS-2031849 and CCF-1845171 (CAREER)."
|
| 88 |
+
}
|
| 89 |
+
],
|
| 90 |
+
"appendix": [
|
| 91 |
+
{
|
| 92 |
+
"section_id": "Appendix 1",
|
| 93 |
+
"parent_section_id": null,
|
| 94 |
+
"section_name": "Appendix A Proofs from Sections\u00a04 and \u00a05",
|
| 95 |
+
"text": "We first give the proof for Theorem 4.1 ###reference_theorem1###. In the process we will also prove Lemma 4.3 ###reference_theorem3###, as Theorem 4.1 ###reference_theorem1### follows immediately from an induction based on the lemma.\nWe do this by induction. At , it\u2019s easy to check that we can set .\nSuppose this is true for some layer , if the weights of layer are for heads, at output of layer we have:\nNote that the same equation is true for just by letting . Let the middle matrix has the following structure:\nThen one can choose the parameters of the next layer as in Lemma 4.3 ###reference_theorem3###\nOne can check that this choice satisfies (12 ###reference_###).\n\u220e\nThis lemma is in fact a corollary of Lemma 4.3 ###reference_theorem3###. We first give a more detailed version which explicitly state the unknown matrices :\nIn the setup of Theorem 4.1 ###reference_theorem1### with diagonal parameters (9 ###reference_###),\none can recursively compute matrices using the following formula\nwhere and initial conditions\nFirst, we compute the following matrix that appeared in Lemma 4.3 ###reference_theorem3### for the specific diagonal case:\nThis implies that , , and . Next we rewrite :\nHere the first step is by Theorem 4.1 ###reference_theorem1###, the second step replaces with , the third step uses the fact that to get rid of the cross terms.\nThe remaining proof just substitutes the formula for into Lemma 4.3 ###reference_theorem3###.\n\u220e\nNow Lemma A.1 ###reference_theorem1### implies Lemma 4.4 ###reference_theorem4### immediately by setting , , , and .\nBy Theorem 4.1 ###reference_theorem1###, we know for some . When , with high probability the norm of is on the order of , and the norm of is . Therefore we only need to bound the correlation. The correlation is equal to\nWe know with high probability because . The second term can be written as where is a vector whose coordinates are and for , therefore with high probability . Therefore, with high probability the cosine similarity is at most\nWhen this can be made smaller than any fixed constant.\n\u220e\nIn this section we prove Theorem 5.1 ###reference_theorem1### by finding hyperparameters for GD++ algorithm that solves least squares problems with very high accuracy. The first steps in the construction iteratively makes the data \u2019s better conditioned, and the last step is a single step of gradient descent. The proof is based on several lemma, first we observe that if the data is very well-conditioned, then one-step gradient descent solves the problem accurately:\nGiven where has eigenvalues between and . Let be the optimal least squares solution, then satisfies .\nWe can write . By the fact that is the optimal solution we know \u2019s satisfy . Therefore . This implies\n\u220e\nNext we show that by applying just the preconditioning step of GD++, one can get a well-conditioned matrix very quickly. Note that the matrix is updated as , so an eigenvalue of in the original matrix would become . The following lemma shows that this transformation is effective in shrinking the condition number\nSuppose , then there exists an universal constant such that choosing implies\nOn the other hand, if where , then choosing implies\nThe first claim shows that one can reduce the condition number by a constant factor in every step until it\u2019s a small constant. The second claim shows that once the condition number is small (), each iteration can bring it much closer to 1 (to the order of ).\nNow we prove the lemma.\nFirst, notice that the function is monotonically nondecreasing for if (indeed, it\u2019s derivative is always nonnegative). Therefore, the max is always achieved at and the min is always achieved at . The new ratio is therefore\nWhen the ratio is always below which is a constant bounded away from 1.\nWhen , we can write down the RHS in terms of\nNote that by the careful choice of , the RHS has the following Taylor expansion:\nOne can then check the RHS is always upperbounded by when .\n\u220e\nWith the two lemmas we are now ready to prove the main theorem:\nBy Lemma A.3 ###reference_theorem3### we know in iterations, by assigning in the way of Lemma A.3 ###reference_theorem3### one can reduce the condition number of to (we chose here to give some slack for later analysis).\nLet be the covariance matrix after these iterations, and be the upper and lowerbound for its eigenvalues. The data \u2019s are transformed to a new data for some matrix . Let , then since we know is a matrix with singular values between and . The optimal solution has norm at most . Therefore by Lemma A.2 ###reference_theorem2### we know the one-step gradient step with satisfy . The test data is also transformed to , and the algorithm outputs , so the error is at most . By the choice of we can check that RHS is at most .\n\u220e\nThe key observation here is that when , under the assumptions we have . Therefore the ridge regression solutions converge to and the desired output is .\nBy the calculations before, we know after the first-layer, the implicit is . As long as is a constant, when we know (as the part of that depend on is negligible compared to noise), therefore the output of the second layer satisfies\nTherefore, as long as we choose and to satisfy when or (notice that these are two linear equations on and , so they always have a solution), then we have for the two noise levels.\n\u220e"
|
| 96 |
+
},
|
| 97 |
+
{
|
| 98 |
+
"section_id": "Appendix 2",
|
| 99 |
+
"parent_section_id": null,
|
| 100 |
+
"section_name": "Appendix B More experiments",
|
| 101 |
+
"text": "Here we provide results of additional experiments that did not make it to the main text.\nFig. 6 ###reference_### shows an example of unadjusted loss. Clearly, it is virtually impossible to compare the methods across various noise levels this way.\nFig. 7 ###reference_### shows per-variance profile of intermediate predictions of the network of varying depth. It appears that GD++ demonstrates behavior typical of GD-based algorithms: early iterations model higher noise (similar to early stopping), gradually converging towards lower noise predictions. Diag exhibits this patter initially, but then dramatically improves, particularly for lower noise ranges. Intriguingly, Full displays the opposite trend, first improving low-noise predictions, followed by a decline in higher noise prediction accuracy, especially in the last layer.\nFinally, Table 1 ###reference_### presents comprehensive numerical results for our experiments across various mixed noise variance models. For each model variant (represented by a column), the best-performing result is highlighted in bold.\n###figure_6### ###figure_7### ###figure_8### ###figure_9### ###table_1###"
|
| 102 |
+
}
|
| 103 |
+
],
|
| 104 |
+
"tables": {
|
| 105 |
+
"1": {
|
| 106 |
+
"table_html": "<figure class=\"ltx_table\" id=\"A2.T1\">\n<table class=\"ltx_tabular ltx_centering ltx_align_middle\" id=\"A2.T1.9\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"A2.T1.2.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr ltx_border_t\" id=\"A2.T1.2.2.3\">Method</td>\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr ltx_border_t\" colspan=\"8\" id=\"A2.T1.1.1.1\">Uniform )</td>\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" colspan=\"2\" id=\"A2.T1.2.2.2\">Categorical \n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.9.10.1\">\n<td class=\"ltx_td ltx_border_l ltx_border_rr\" id=\"A2.T1.9.10.1.1\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.10.1.2\">0</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.10.1.3\">1</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.10.1.4\">2</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.10.1.5\">3</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.10.1.6\">4</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.10.1.7\">5</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.10.1.8\">6</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"A2.T1.9.10.1.9\">7</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.10.1.10\">{1,3}</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.10.1.11\">{1,3,5}</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.9.11.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_tt\" colspan=\"11\" id=\"A2.T1.9.11.2.1\">1 layer</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.3.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr ltx_border_t\" id=\"A2.T1.3.3.1\">GD<sup class=\"ltx_sup\" id=\"A2.T1.3.3.1.1\"><span class=\"ltx_text ltx_font_italic\" id=\"A2.T1.3.3.1.1.1\">++</span></sup>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.3.3.2\">1.768</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.3.3.3\">1.639</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.3.3.4\">1.396</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.3.3.5\">1.175</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.3.3.6\">1.015</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.3.3.7\">0.907</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.3.3.8\">0.841</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"A2.T1.3.3.9\">0.806</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.3.3.10\">1.007</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.3.3.11\">0.819</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.9.12.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr ltx_border_t\" id=\"A2.T1.9.12.3.1\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"A2.T1.9.12.3.1.1\">Diag</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.12.3.2\">1.767</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.12.3.3\">1.639</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.12.3.4\">1.396</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.12.3.5\">1.175</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.12.3.6\">1.015</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.12.3.7\">0.906</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.12.3.8\">0.841</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"A2.T1.9.12.3.9\">0.806</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.12.3.10\">1.007</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.12.3.11\">0.819</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.9.13.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr ltx_border_t\" id=\"A2.T1.9.13.4.1\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"A2.T1.9.13.4.1.1\">Full</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.13.4.2\">1.768</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.13.4.3\">1.640</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.13.4.4\">1.397</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.13.4.5\">1.176</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.13.4.6\">1.016</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.13.4.7\">0.907</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.13.4.8\">0.842</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"A2.T1.9.13.4.9\">0.806</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.13.4.10\">1.008</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.13.4.11\">0.820</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.9.14.5\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_tt\" colspan=\"11\" id=\"A2.T1.9.14.5.1\">2 layers</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.4.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr ltx_border_t\" id=\"A2.T1.4.4.1\">GD<sup class=\"ltx_sup\" id=\"A2.T1.4.4.1.1\"><span class=\"ltx_text ltx_font_italic\" id=\"A2.T1.4.4.1.1.1\">++</span></sup>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.4.4.2\">0.341</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.4.4.3\">0.295</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.4.4.4\">0.243</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.4.4.5\">0.265</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.4.4.6\">0.347</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.4.4.7\">0.366</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.4.4.8\">0.440</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"A2.T1.4.4.9\">0.530</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.4.4.10\">0.305</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.4.4.11\">0.427</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.9.15.6\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr ltx_border_t\" id=\"A2.T1.9.15.6.1\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"A2.T1.9.15.6.1.1\">Diag</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.15.6.2\">0.265</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.15.6.3\">0.214</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.15.6.4\">0.173</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.15.6.5\">0.188</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.15.6.6\">0.219</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.15.6.7\">0.242</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.15.6.8\">0.254</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"A2.T1.9.15.6.9\">0.259</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.15.6.10\">0.201</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.15.6.11\">0.246</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.9.16.7\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr ltx_border_t\" id=\"A2.T1.9.16.7.1\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"A2.T1.9.16.7.1.1\">Full</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.16.7.2\">0.264</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.16.7.3\">0.215</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.16.7.4\">0.173</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.16.7.5\">0.188</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.16.7.6\">0.220</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.16.7.7\">0.245</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.16.7.8\">0.259</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"A2.T1.9.16.7.9\">0.263</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.16.7.10\">0.202</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.16.7.11\">0.276</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.9.17.8\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_tt\" colspan=\"11\" id=\"A2.T1.9.17.8.1\">3 layers</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.5.5\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr ltx_border_t\" id=\"A2.T1.5.5.1\">GD<sup class=\"ltx_sup\" id=\"A2.T1.5.5.1.1\"><span class=\"ltx_text ltx_font_italic\" id=\"A2.T1.5.5.1.1.1\">++</span></sup>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.5.5.2\">0.019</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.5.5.3\">0.021</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.5.5.4\">0.071</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.5.5.5\">0.161</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.5.5.6\">0.259</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.5.5.7\">0.344</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.5.5.8\">0.454</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"A2.T1.5.5.9\">0.530</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.5.5.10\">0.222</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.5.5.11\">0.422</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.9.18.9\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr ltx_border_t\" id=\"A2.T1.9.18.9.1\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"A2.T1.9.18.9.1.1\">Diag</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.18.9.2\">0.013</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.18.9.3\">0.015</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.18.9.4\">0.048</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.18.9.5\">0.087</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.18.9.6\">0.109</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.18.9.7\">0.118</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.18.9.8\">0.121</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"A2.T1.9.18.9.9\">0.123</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.18.9.10\">0.098</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.18.9.11\">0.119</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.9.19.10\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr ltx_border_t\" id=\"A2.T1.9.19.10.1\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"A2.T1.9.19.10.1.1\">Full</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.19.10.2\">0.012</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.19.10.3\">0.015</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.19.10.4\">0.049</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.19.10.5\">0.075</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.19.10.6\">0.101</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.19.10.7\">0.117</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.19.10.8\">0.124</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"A2.T1.9.19.10.9\">0.127</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.19.10.10\">0.076</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.19.10.11\">0.113</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.9.20.11\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_tt\" colspan=\"11\" id=\"A2.T1.9.20.11.1\">4 layers</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.6.6\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr ltx_border_t\" id=\"A2.T1.6.6.1\">GD<sup class=\"ltx_sup\" id=\"A2.T1.6.6.1.1\"><span class=\"ltx_text ltx_font_italic\" id=\"A2.T1.6.6.1.1.1\">++</span></sup>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.6.6.2\">9.91e-05</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.6.6.3\">0.014</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.6.6.4\">0.066</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.6.6.5\">0.160</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.6.6.6\">0.258</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.6.6.7\">0.344</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.6.6.8\">0.454</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"A2.T1.6.6.9\">0.530</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.6.6.10\">0.222</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.6.6.11\">0.422</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.9.21.12\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr ltx_border_t\" id=\"A2.T1.9.21.12.1\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"A2.T1.9.21.12.1.1\">Diag</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.21.12.2\">1.19e-04</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.21.12.3\">0.006</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.21.12.4\">0.024</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.21.12.5\">0.041</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.21.12.6\">0.050</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.21.12.7\">0.059</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.21.12.8\">0.065</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"A2.T1.9.21.12.9\">0.073</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.21.12.10\">0.043</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.21.12.11\">0.062</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.9.22.13\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr ltx_border_t\" id=\"A2.T1.9.22.13.1\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"A2.T1.9.22.13.1.1\">Full</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.22.13.2\">1.63e-04</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.22.13.3\">0.005</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.22.13.4\">0.021</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.22.13.5\">0.038</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.22.13.6\">0.052</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.22.13.7\">0.065</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.22.13.8\">0.068</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"A2.T1.9.22.13.9\">0.076</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.22.13.10\">0.032</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.22.13.11\">0.061</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.9.23.14\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_tt\" colspan=\"11\" id=\"A2.T1.9.23.14.1\">5 layers</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.7.7\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr ltx_border_t\" id=\"A2.T1.7.7.1\">GD<sup class=\"ltx_sup\" id=\"A2.T1.7.7.1.1\"><span class=\"ltx_text ltx_font_italic\" id=\"A2.T1.7.7.1.1.1\">++</span></sup>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.7.7.2\">1.14e-07</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.7.7.3\">0.014</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.7.7.4\">0.066</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.7.7.5\">0.161</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.7.7.6\">0.265</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.7.7.7\">0.344</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.7.7.8\">0.454</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"A2.T1.7.7.9\">0.530</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.7.7.10\">0.222</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.7.7.11\">0.422</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.9.24.15\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr ltx_border_t\" id=\"A2.T1.9.24.15.1\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"A2.T1.9.24.15.1.1\">Diag</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.24.15.2\">1.81e-07</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.24.15.3\">0.004</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.24.15.4\">0.016</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.24.15.5\">0.029</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.24.15.6\">0.041</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.24.15.7\">0.051</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.24.15.8\">0.058</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"A2.T1.9.24.15.9\">0.062</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.24.15.10\">0.026</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.24.15.11\">0.051</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.9.25.16\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr ltx_border_t\" id=\"A2.T1.9.25.16.1\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"A2.T1.9.25.16.1.1\">Full</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.25.16.2\">1.79e-07</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.25.16.3\"><span class=\"ltx_text ltx_font_bold\" id=\"A2.T1.9.25.16.3.1\">0.002</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.25.16.4\">0.015</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.25.16.5\">0.026</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.25.16.6\">0.038</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.25.16.7\">0.048</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.25.16.8\">0.059</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"A2.T1.9.25.16.9\">0.065</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.25.16.10\">0.016</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.25.16.11\">0.048</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.9.26.17\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_tt\" colspan=\"11\" id=\"A2.T1.9.26.17.1\">6 layers</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.8.8\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr ltx_border_t\" id=\"A2.T1.8.8.1\">GD<sup class=\"ltx_sup\" id=\"A2.T1.8.8.1.1\"><span class=\"ltx_text ltx_font_italic\" id=\"A2.T1.8.8.1.1.1\">++</span></sup>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.8.8.2\">2.37e-10</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.8.8.3\">0.009</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.8.8.4\">0.066</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.8.8.5\">0.161</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.8.8.6\">0.265</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.8.8.7\">0.344</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.8.8.8\">0.454</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"A2.T1.8.8.9\">0.530</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.8.8.10\">0.222</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.8.8.11\">0.422</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.9.27.18\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr ltx_border_t\" id=\"A2.T1.9.27.18.1\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"A2.T1.9.27.18.1.1\">Diag</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.27.18.2\">2.57e-10</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.27.18.3\">0.003</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.27.18.4\">0.014</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.27.18.5\">0.028</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.27.18.6\">0.040</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.27.18.7\">0.048</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.27.18.8\">0.054</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"A2.T1.9.27.18.9\">0.059</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.27.18.10\">0.020</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.27.18.11\">0.047</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.9.28.19\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr ltx_border_t\" id=\"A2.T1.9.28.19.1\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"A2.T1.9.28.19.1.1\">Full</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.28.19.2\">2.71e-10</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.28.19.3\"><span class=\"ltx_text ltx_font_bold\" id=\"A2.T1.9.28.19.3.1\">0.002</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.28.19.4\">0.014</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.28.19.5\">0.025</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.28.19.6\">0.036</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.28.19.7\">0.044</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.28.19.8\">0.052</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"A2.T1.9.28.19.9\">0.059</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.28.19.10\">0.011</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.28.19.11\">0.043</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.9.29.20\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_tt\" colspan=\"11\" id=\"A2.T1.9.29.20.1\">7 layers</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.9.9\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr ltx_border_t\" id=\"A2.T1.9.9.1\">GD<sup class=\"ltx_sup\" id=\"A2.T1.9.9.1.1\"><span class=\"ltx_text ltx_font_italic\" id=\"A2.T1.9.9.1.1.1\">++</span></sup>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.9.2\">2.65e-12</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.9.3\">0.009</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.9.4\">0.066</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.9.5\">0.161</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.9.6\">0.265</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.9.7\">0.344</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.9.8\">0.454</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"A2.T1.9.9.9\">0.530</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.9.10\">0.222</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.9.11\">0.422</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.9.30.21\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr ltx_border_t\" id=\"A2.T1.9.30.21.1\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"A2.T1.9.30.21.1.1\">Diag</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.30.21.2\">2.50e-12</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.30.21.3\"><span class=\"ltx_text ltx_font_bold\" id=\"A2.T1.9.30.21.3.1\">0.002</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.30.21.4\">0.014</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.30.21.5\">0.027</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.30.21.6\">0.040</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.30.21.7\">0.047</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.30.21.8\">0.052</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"A2.T1.9.30.21.9\">0.059</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.30.21.10\">0.018</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.30.21.11\">0.046</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.9.31.22\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr ltx_border_t\" id=\"A2.T1.9.31.22.1\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"A2.T1.9.31.22.1.1\">Full</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.31.22.2\">2.50e-12</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.31.22.3\"><span class=\"ltx_text ltx_font_bold\" id=\"A2.T1.9.31.22.3.1\">0.002</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.31.22.4\"><span class=\"ltx_text ltx_font_bold\" id=\"A2.T1.9.31.22.4.1\">0.010</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.31.22.5\">0.025</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.31.22.6\"><span class=\"ltx_text ltx_font_bold\" id=\"A2.T1.9.31.22.6.1\">0.035</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.31.22.7\"><span class=\"ltx_text ltx_font_bold\" id=\"A2.T1.9.31.22.7.1\">0.047</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.31.22.8\"><span class=\"ltx_text ltx_font_bold\" id=\"A2.T1.9.31.22.8.1\">0.050</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"A2.T1.9.31.22.9\"><span class=\"ltx_text ltx_font_bold\" id=\"A2.T1.9.31.22.9.1\">0.057</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.31.22.10\"><span class=\"ltx_text ltx_font_bold\" id=\"A2.T1.9.31.22.10.1\">0.010</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.31.22.11\"><span class=\"ltx_text ltx_font_bold\" id=\"A2.T1.9.31.22.11.1\">0.035</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.9.32.23\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_tt\" colspan=\"11\" id=\"A2.T1.9.32.23.1\">Baselines</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.9.33.24\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr ltx_border_t\" id=\"A2.T1.9.33.24.1\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"A2.T1.9.33.24.1.1\">ConstRR</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.33.24.2\"><span class=\"ltx_text ltx_font_bold\" id=\"A2.T1.9.33.24.2.1\">0</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.33.24.3\">0.009</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.33.24.4\">0.066</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.33.24.5\">0.161</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.33.24.6\">0.265</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.33.24.7\">0.365</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.33.24.8\">0.454</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"A2.T1.9.33.24.9\">0.530</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.33.24.10\">0.222</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.33.24.11\">0.422</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.9.34.25\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr ltx_border_t\" id=\"A2.T1.9.34.25.1\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"A2.T1.9.34.25.1.1\">AdaRR</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.34.25.2\"><span class=\"ltx_text ltx_font_bold\" id=\"A2.T1.9.34.25.2.1\">0</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.34.25.3\">0.003</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.34.25.4\">0.016</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.34.25.5\">0.034</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.34.25.6\">0.053</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.34.25.7\">0.068</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.34.25.8\">0.081</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"A2.T1.9.34.25.9\">0.092</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.34.25.10\">0.051</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A2.T1.9.34.25.11\">0.084</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A2.T1.9.35.26\">\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_l ltx_border_rr ltx_border_t\" id=\"A2.T1.9.35.26.1\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"A2.T1.9.35.26.1.1\">TunedRR</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"A2.T1.9.35.26.2\"><span class=\"ltx_text ltx_font_bold\" id=\"A2.T1.9.35.26.2.1\">0</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"A2.T1.9.35.26.3\"><span class=\"ltx_text ltx_font_bold\" id=\"A2.T1.9.35.26.3.1\">0.002</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"A2.T1.9.35.26.4\"><span class=\"ltx_text ltx_font_bold\" id=\"A2.T1.9.35.26.4.1\">0.010</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"A2.T1.9.35.26.5\"><span class=\"ltx_text ltx_font_bold\" id=\"A2.T1.9.35.26.5.1\">0.023</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"A2.T1.9.35.26.6\">0.037</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"A2.T1.9.35.26.7\">0.049</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"A2.T1.9.35.26.8\">0.060</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_rr ltx_border_t\" id=\"A2.T1.9.35.26.9\">0.068</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"A2.T1.9.35.26.10\">0.021</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"A2.T1.9.35.26.11\">0.054</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 1: </span>Adjusted evaluation loss for models with various number of layers with uniform noise variance . We highlight in bold the best results for each problem setup (i.e.\u00a0each column).</figcaption>\n</figure>",
|
| 107 |
+
"capture": "Table 1: Adjusted evaluation loss for models with various number of layers with uniform noise variance . We highlight in bold the best results for each problem setup (i.e.\u00a0each column)."
|
| 108 |
+
}
|
| 109 |
+
},
|
| 110 |
+
"image_paths": {
|
| 111 |
+
"1(a)": {
|
| 112 |
+
"figure_path": "2402.14180v2_figure_1(a).png",
|
| 113 |
+
"caption": "Figure 1: In-context learning performance for noisy linear regression problem across models with different number of layers and \u03c3m\u2062a\u2062xsubscript\ud835\udf0e\ud835\udc5a\ud835\udc4e\ud835\udc65\\sigma_{max}italic_\u03c3 start_POSTSUBSCRIPT italic_m italic_a italic_x end_POSTSUBSCRIPT for \u03c3\u03c4\u223cU\u2062(0,\u03c3m\u2062a\u2062x)similar-tosubscript\ud835\udf0e\ud835\udf0f\ud835\udc480subscript\ud835\udf0e\ud835\udc5a\ud835\udc4e\ud835\udc65\\sigma_{\\tau}\\sim U(0,\\sigma_{max})italic_\u03c3 start_POSTSUBSCRIPT italic_\u03c4 end_POSTSUBSCRIPT \u223c italic_U ( 0 , italic_\u03c3 start_POSTSUBSCRIPT italic_m italic_a italic_x end_POSTSUBSCRIPT ). Each marker corresponds to a separately trained model with a given number of layers. Models with diagonal attention weights (Diag) match those with full attention weights (Full). Models specialized on a fixed noise (GD++) perform poorly, similar to a Ridge Regression solution with a constant noise (ConstRR). Among the baselines, only tuned exact Ridge Regression solution (TunedRR) is comparable with linear transformers.",
|
| 114 |
+
"url": "http://arxiv.org/html/2402.14180v2/x1.png"
|
| 115 |
+
},
|
| 116 |
+
"1(b)": {
|
| 117 |
+
"figure_path": "2402.14180v2_figure_1(b).png",
|
| 118 |
+
"caption": "Figure 1: In-context learning performance for noisy linear regression problem across models with different number of layers and \u03c3m\u2062a\u2062xsubscript\ud835\udf0e\ud835\udc5a\ud835\udc4e\ud835\udc65\\sigma_{max}italic_\u03c3 start_POSTSUBSCRIPT italic_m italic_a italic_x end_POSTSUBSCRIPT for \u03c3\u03c4\u223cU\u2062(0,\u03c3m\u2062a\u2062x)similar-tosubscript\ud835\udf0e\ud835\udf0f\ud835\udc480subscript\ud835\udf0e\ud835\udc5a\ud835\udc4e\ud835\udc65\\sigma_{\\tau}\\sim U(0,\\sigma_{max})italic_\u03c3 start_POSTSUBSCRIPT italic_\u03c4 end_POSTSUBSCRIPT \u223c italic_U ( 0 , italic_\u03c3 start_POSTSUBSCRIPT italic_m italic_a italic_x end_POSTSUBSCRIPT ). Each marker corresponds to a separately trained model with a given number of layers. Models with diagonal attention weights (Diag) match those with full attention weights (Full). Models specialized on a fixed noise (GD++) perform poorly, similar to a Ridge Regression solution with a constant noise (ConstRR). Among the baselines, only tuned exact Ridge Regression solution (TunedRR) is comparable with linear transformers.",
|
| 119 |
+
"url": "http://arxiv.org/html/2402.14180v2/x2.png"
|
| 120 |
+
},
|
| 121 |
+
"2(a)": {
|
| 122 |
+
"figure_path": "2402.14180v2_figure_2(a).png",
|
| 123 |
+
"caption": "Figure 2: Per-variance profile of models behavior for uniform noise variance \u03c3\u03c4\u223cU\u2062(0,\u03c3m\u2062a\u2062x)similar-tosubscript\ud835\udf0e\ud835\udf0f\ud835\udc480subscript\ud835\udf0e\ud835\udc5a\ud835\udc4e\ud835\udc65\\sigma_{\\tau}\\sim U(0,\\sigma_{max})italic_\u03c3 start_POSTSUBSCRIPT italic_\u03c4 end_POSTSUBSCRIPT \u223c italic_U ( 0 , italic_\u03c3 start_POSTSUBSCRIPT italic_m italic_a italic_x end_POSTSUBSCRIPT ). Top two rows: 7-layer models with varying \u03c3m\u2062a\u2062xsubscript\ud835\udf0e\ud835\udc5a\ud835\udc4e\ud835\udc65\\sigma_{max}italic_\u03c3 start_POSTSUBSCRIPT italic_m italic_a italic_x end_POSTSUBSCRIPT. Bottom row: models with varying numbers of layers, fixed \u03c3m\u2062a\u2062x=5subscript\ud835\udf0e\ud835\udc5a\ud835\udc4e\ud835\udc655\\sigma_{max}=5italic_\u03c3 start_POSTSUBSCRIPT italic_m italic_a italic_x end_POSTSUBSCRIPT = 5. In-distribution noise is shaded gray.",
|
| 124 |
+
"url": "http://arxiv.org/html/2402.14180v2/x3.png"
|
| 125 |
+
},
|
| 126 |
+
"2(b)": {
|
| 127 |
+
"figure_path": "2402.14180v2_figure_2(b).png",
|
| 128 |
+
"caption": "Figure 2: Per-variance profile of models behavior for uniform noise variance \u03c3\u03c4\u223cU\u2062(0,\u03c3m\u2062a\u2062x)similar-tosubscript\ud835\udf0e\ud835\udf0f\ud835\udc480subscript\ud835\udf0e\ud835\udc5a\ud835\udc4e\ud835\udc65\\sigma_{\\tau}\\sim U(0,\\sigma_{max})italic_\u03c3 start_POSTSUBSCRIPT italic_\u03c4 end_POSTSUBSCRIPT \u223c italic_U ( 0 , italic_\u03c3 start_POSTSUBSCRIPT italic_m italic_a italic_x end_POSTSUBSCRIPT ). Top two rows: 7-layer models with varying \u03c3m\u2062a\u2062xsubscript\ud835\udf0e\ud835\udc5a\ud835\udc4e\ud835\udc65\\sigma_{max}italic_\u03c3 start_POSTSUBSCRIPT italic_m italic_a italic_x end_POSTSUBSCRIPT. Bottom row: models with varying numbers of layers, fixed \u03c3m\u2062a\u2062x=5subscript\ud835\udf0e\ud835\udc5a\ud835\udc4e\ud835\udc655\\sigma_{max}=5italic_\u03c3 start_POSTSUBSCRIPT italic_m italic_a italic_x end_POSTSUBSCRIPT = 5. In-distribution noise is shaded gray.",
|
| 129 |
+
"url": "http://arxiv.org/html/2402.14180v2/x4.png"
|
| 130 |
+
},
|
| 131 |
+
"2(c)": {
|
| 132 |
+
"figure_path": "2402.14180v2_figure_2(c).png",
|
| 133 |
+
"caption": "Figure 2: Per-variance profile of models behavior for uniform noise variance \u03c3\u03c4\u223cU\u2062(0,\u03c3m\u2062a\u2062x)similar-tosubscript\ud835\udf0e\ud835\udf0f\ud835\udc480subscript\ud835\udf0e\ud835\udc5a\ud835\udc4e\ud835\udc65\\sigma_{\\tau}\\sim U(0,\\sigma_{max})italic_\u03c3 start_POSTSUBSCRIPT italic_\u03c4 end_POSTSUBSCRIPT \u223c italic_U ( 0 , italic_\u03c3 start_POSTSUBSCRIPT italic_m italic_a italic_x end_POSTSUBSCRIPT ). Top two rows: 7-layer models with varying \u03c3m\u2062a\u2062xsubscript\ud835\udf0e\ud835\udc5a\ud835\udc4e\ud835\udc65\\sigma_{max}italic_\u03c3 start_POSTSUBSCRIPT italic_m italic_a italic_x end_POSTSUBSCRIPT. Bottom row: models with varying numbers of layers, fixed \u03c3m\u2062a\u2062x=5subscript\ud835\udf0e\ud835\udc5a\ud835\udc4e\ud835\udc655\\sigma_{max}=5italic_\u03c3 start_POSTSUBSCRIPT italic_m italic_a italic_x end_POSTSUBSCRIPT = 5. In-distribution noise is shaded gray.",
|
| 134 |
+
"url": "http://arxiv.org/html/2402.14180v2/x5.png"
|
| 135 |
+
},
|
| 136 |
+
"3(a)": {
|
| 137 |
+
"figure_path": "2402.14180v2_figure_3(a).png",
|
| 138 |
+
"caption": "Figure 3: In-context learning performance for noisy linear regression across models with varying number of layers for conditional noise variance \u03c3\u03c4\u2208{1,3}subscript\ud835\udf0e\ud835\udf0f13\\sigma_{\\tau}\\in\\{1,3\\}italic_\u03c3 start_POSTSUBSCRIPT italic_\u03c4 end_POSTSUBSCRIPT \u2208 { 1 , 3 } and \u03c3\u03c4\u2208{1,3,5}subscript\ud835\udf0e\ud835\udf0f135\\sigma_{\\tau}\\in\\{1,3,5\\}italic_\u03c3 start_POSTSUBSCRIPT italic_\u03c4 end_POSTSUBSCRIPT \u2208 { 1 , 3 , 5 }. Top: loss for models with various number of layers and per-variance profile for models with 7 layers. Bottom: Per-variance profile of the model across different numbers of layers. In-distribution noise is shaded gray.",
|
| 139 |
+
"url": "http://arxiv.org/html/2402.14180v2/x6.png"
|
| 140 |
+
},
|
| 141 |
+
"3(b)": {
|
| 142 |
+
"figure_path": "2402.14180v2_figure_3(b).png",
|
| 143 |
+
"caption": "Figure 3: In-context learning performance for noisy linear regression across models with varying number of layers for conditional noise variance \u03c3\u03c4\u2208{1,3}subscript\ud835\udf0e\ud835\udf0f13\\sigma_{\\tau}\\in\\{1,3\\}italic_\u03c3 start_POSTSUBSCRIPT italic_\u03c4 end_POSTSUBSCRIPT \u2208 { 1 , 3 } and \u03c3\u03c4\u2208{1,3,5}subscript\ud835\udf0e\ud835\udf0f135\\sigma_{\\tau}\\in\\{1,3,5\\}italic_\u03c3 start_POSTSUBSCRIPT italic_\u03c4 end_POSTSUBSCRIPT \u2208 { 1 , 3 , 5 }. Top: loss for models with various number of layers and per-variance profile for models with 7 layers. Bottom: Per-variance profile of the model across different numbers of layers. In-distribution noise is shaded gray.",
|
| 144 |
+
"url": "http://arxiv.org/html/2402.14180v2/x7.png"
|
| 145 |
+
},
|
| 146 |
+
"3(c)": {
|
| 147 |
+
"figure_path": "2402.14180v2_figure_3(c).png",
|
| 148 |
+
"caption": "Figure 3: In-context learning performance for noisy linear regression across models with varying number of layers for conditional noise variance \u03c3\u03c4\u2208{1,3}subscript\ud835\udf0e\ud835\udf0f13\\sigma_{\\tau}\\in\\{1,3\\}italic_\u03c3 start_POSTSUBSCRIPT italic_\u03c4 end_POSTSUBSCRIPT \u2208 { 1 , 3 } and \u03c3\u03c4\u2208{1,3,5}subscript\ud835\udf0e\ud835\udf0f135\\sigma_{\\tau}\\in\\{1,3,5\\}italic_\u03c3 start_POSTSUBSCRIPT italic_\u03c4 end_POSTSUBSCRIPT \u2208 { 1 , 3 , 5 }. Top: loss for models with various number of layers and per-variance profile for models with 7 layers. Bottom: Per-variance profile of the model across different numbers of layers. In-distribution noise is shaded gray.",
|
| 149 |
+
"url": "http://arxiv.org/html/2402.14180v2/x8.png"
|
| 150 |
+
},
|
| 151 |
+
"3(d)": {
|
| 152 |
+
"figure_path": "2402.14180v2_figure_3(d).png",
|
| 153 |
+
"caption": "Figure 3: In-context learning performance for noisy linear regression across models with varying number of layers for conditional noise variance \u03c3\u03c4\u2208{1,3}subscript\ud835\udf0e\ud835\udf0f13\\sigma_{\\tau}\\in\\{1,3\\}italic_\u03c3 start_POSTSUBSCRIPT italic_\u03c4 end_POSTSUBSCRIPT \u2208 { 1 , 3 } and \u03c3\u03c4\u2208{1,3,5}subscript\ud835\udf0e\ud835\udf0f135\\sigma_{\\tau}\\in\\{1,3,5\\}italic_\u03c3 start_POSTSUBSCRIPT italic_\u03c4 end_POSTSUBSCRIPT \u2208 { 1 , 3 , 5 }. Top: loss for models with various number of layers and per-variance profile for models with 7 layers. Bottom: Per-variance profile of the model across different numbers of layers. In-distribution noise is shaded gray.",
|
| 154 |
+
"url": "http://arxiv.org/html/2402.14180v2/x9.png"
|
| 155 |
+
},
|
| 156 |
+
"3(e)": {
|
| 157 |
+
"figure_path": "2402.14180v2_figure_3(e).png",
|
| 158 |
+
"caption": "Figure 3: In-context learning performance for noisy linear regression across models with varying number of layers for conditional noise variance \u03c3\u03c4\u2208{1,3}subscript\ud835\udf0e\ud835\udf0f13\\sigma_{\\tau}\\in\\{1,3\\}italic_\u03c3 start_POSTSUBSCRIPT italic_\u03c4 end_POSTSUBSCRIPT \u2208 { 1 , 3 } and \u03c3\u03c4\u2208{1,3,5}subscript\ud835\udf0e\ud835\udf0f135\\sigma_{\\tau}\\in\\{1,3,5\\}italic_\u03c3 start_POSTSUBSCRIPT italic_\u03c4 end_POSTSUBSCRIPT \u2208 { 1 , 3 , 5 }. Top: loss for models with various number of layers and per-variance profile for models with 7 layers. Bottom: Per-variance profile of the model across different numbers of layers. In-distribution noise is shaded gray.",
|
| 159 |
+
"url": "http://arxiv.org/html/2402.14180v2/x10.png"
|
| 160 |
+
},
|
| 161 |
+
"3(f)": {
|
| 162 |
+
"figure_path": "2402.14180v2_figure_3(f).png",
|
| 163 |
+
"caption": "Figure 3: In-context learning performance for noisy linear regression across models with varying number of layers for conditional noise variance \u03c3\u03c4\u2208{1,3}subscript\ud835\udf0e\ud835\udf0f13\\sigma_{\\tau}\\in\\{1,3\\}italic_\u03c3 start_POSTSUBSCRIPT italic_\u03c4 end_POSTSUBSCRIPT \u2208 { 1 , 3 } and \u03c3\u03c4\u2208{1,3,5}subscript\ud835\udf0e\ud835\udf0f135\\sigma_{\\tau}\\in\\{1,3,5\\}italic_\u03c3 start_POSTSUBSCRIPT italic_\u03c4 end_POSTSUBSCRIPT \u2208 { 1 , 3 , 5 }. Top: loss for models with various number of layers and per-variance profile for models with 7 layers. Bottom: Per-variance profile of the model across different numbers of layers. In-distribution noise is shaded gray.",
|
| 164 |
+
"url": "http://arxiv.org/html/2402.14180v2/x11.png"
|
| 165 |
+
},
|
| 166 |
+
"4(a)": {
|
| 167 |
+
"figure_path": "2402.14180v2_figure_4(a).png",
|
| 168 |
+
"caption": "Figure 4: Weights for 4 layer linear transformer with Full parametrization trained with categorical noise \u03c3\u03c4\u2208{1,3}subscript\ud835\udf0e\ud835\udf0f13\\sigma_{\\tau}\\in\\{1,3\\}italic_\u03c3 start_POSTSUBSCRIPT italic_\u03c4 end_POSTSUBSCRIPT \u2208 { 1 , 3 }. Top: weights for Qlsuperscript\ud835\udc44\ud835\udc59Q^{l}italic_Q start_POSTSUPERSCRIPT italic_l end_POSTSUPERSCRIPT matrix, bottom: weights for Plsuperscript\ud835\udc43\ud835\udc59P^{l}italic_P start_POSTSUPERSCRIPT italic_l end_POSTSUPERSCRIPT matrix.",
|
| 169 |
+
"url": "http://arxiv.org/html/2402.14180v2/x12.png"
|
| 170 |
+
},
|
| 171 |
+
"4(b)": {
|
| 172 |
+
"figure_path": "2402.14180v2_figure_4(b).png",
|
| 173 |
+
"caption": "Figure 4: Weights for 4 layer linear transformer with Full parametrization trained with categorical noise \u03c3\u03c4\u2208{1,3}subscript\ud835\udf0e\ud835\udf0f13\\sigma_{\\tau}\\in\\{1,3\\}italic_\u03c3 start_POSTSUBSCRIPT italic_\u03c4 end_POSTSUBSCRIPT \u2208 { 1 , 3 }. Top: weights for Qlsuperscript\ud835\udc44\ud835\udc59Q^{l}italic_Q start_POSTSUPERSCRIPT italic_l end_POSTSUPERSCRIPT matrix, bottom: weights for Plsuperscript\ud835\udc43\ud835\udc59P^{l}italic_P start_POSTSUPERSCRIPT italic_l end_POSTSUPERSCRIPT matrix.",
|
| 174 |
+
"url": "http://arxiv.org/html/2402.14180v2/x13.png"
|
| 175 |
+
},
|
| 176 |
+
"5": {
|
| 177 |
+
"figure_path": "2402.14180v2_figure_5.png",
|
| 178 |
+
"caption": "Figure 5: Linear transformer models show a consistent decrease in error per layer when trained on data with mixed noise variance \u03c3\u03c4\u223cU\u2062(0,5)similar-tosubscript\ud835\udf0e\ud835\udf0f\ud835\udc4805\\sigma_{\\tau}\\sim U(0,5)italic_\u03c3 start_POSTSUBSCRIPT italic_\u03c4 end_POSTSUBSCRIPT \u223c italic_U ( 0 , 5 ). The error bars measure variance over 5555 training seeds.",
|
| 179 |
+
"url": "http://arxiv.org/html/2402.14180v2/x14.png"
|
| 180 |
+
},
|
| 181 |
+
"6": {
|
| 182 |
+
"figure_path": "2402.14180v2_figure_6.png",
|
| 183 |
+
"caption": "Figure 6: Example of unadjusted loss given by directly minimizing (7). It is pretty hard to see variation between comparable methods using this loss directly.",
|
| 184 |
+
"url": "http://arxiv.org/html/2402.14180v2/x15.png"
|
| 185 |
+
},
|
| 186 |
+
"7(a)": {
|
| 187 |
+
"figure_path": "2402.14180v2_figure_7(a).png",
|
| 188 |
+
"caption": "Figure 7: Layer by layer prediction quality for different models with \u03c3\u03c4\u223cU\u2062(0,5)similar-tosubscript\ud835\udf0e\ud835\udf0f\ud835\udc4805\\sigma_{\\tau}\\sim U(0,5)italic_\u03c3 start_POSTSUBSCRIPT italic_\u03c4 end_POSTSUBSCRIPT \u223c italic_U ( 0 , 5 ). The error bars measure std over 5555 training seeds.",
|
| 189 |
+
"url": "http://arxiv.org/html/2402.14180v2/x16.png"
|
| 190 |
+
},
|
| 191 |
+
"7(b)": {
|
| 192 |
+
"figure_path": "2402.14180v2_figure_7(b).png",
|
| 193 |
+
"caption": "Figure 7: Layer by layer prediction quality for different models with \u03c3\u03c4\u223cU\u2062(0,5)similar-tosubscript\ud835\udf0e\ud835\udf0f\ud835\udc4805\\sigma_{\\tau}\\sim U(0,5)italic_\u03c3 start_POSTSUBSCRIPT italic_\u03c4 end_POSTSUBSCRIPT \u223c italic_U ( 0 , 5 ). The error bars measure std over 5555 training seeds.",
|
| 194 |
+
"url": "http://arxiv.org/html/2402.14180v2/x17.png"
|
| 195 |
+
}
|
| 196 |
+
},
|
| 197 |
+
"validation": true,
|
| 198 |
+
"references": [
|
| 199 |
+
{
|
| 200 |
+
"1": {
|
| 201 |
+
"title": "Gpt-4 technical report.",
|
| 202 |
+
"author": "Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya,\nFlorencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman,\nShyamal Anadkat, et al.",
|
| 203 |
+
"venue": "arXiv preprint arXiv:2303.08774, 2023.",
|
| 204 |
+
"url": null
|
| 205 |
+
}
|
| 206 |
+
},
|
| 207 |
+
{
|
| 208 |
+
"2": {
|
| 209 |
+
"title": "Transformers learn to implement preconditioned gradient descent for\nin-context learning.",
|
| 210 |
+
"author": "Kwangjun Ahn, Xiang Cheng, Hadi Daneshmand, and Suvrit Sra.",
|
| 211 |
+
"venue": "arXiv preprint arXiv:2306.00297, 2023.",
|
| 212 |
+
"url": null
|
| 213 |
+
}
|
| 214 |
+
},
|
| 215 |
+
{
|
| 216 |
+
"3": {
|
| 217 |
+
"title": "What learning algorithm is in-context learning? investigations with\nlinear models.",
|
| 218 |
+
"author": "Ekin Aky\u00fcrek, Dale Schuurmans, Jacob Andreas, Tengyu Ma, and Denny Zhou.",
|
| 219 |
+
"venue": "arXiv preprint arXiv:2211.15661, 2022.",
|
| 220 |
+
"url": null
|
| 221 |
+
}
|
| 222 |
+
},
|
| 223 |
+
{
|
| 224 |
+
"4": {
|
| 225 |
+
"title": "In-Context language learning: Architectures and algorithms.",
|
| 226 |
+
"author": "Ekin Aky\u00fcrek, Bailin Wang, Yoon Kim, and Jacob Andreas.",
|
| 227 |
+
"venue": "arXiv preprint arXiv:2401.12973, 2024.",
|
| 228 |
+
"url": null
|
| 229 |
+
}
|
| 230 |
+
},
|
| 231 |
+
{
|
| 232 |
+
"5": {
|
| 233 |
+
"title": "Palm 2 technical report.",
|
| 234 |
+
"author": "Rohan Anil, Andrew M Dai, Orhan Firat, Melvin Johnson, Dmitry Lepikhin,\nAlexandre Passos, Siamak Shakeri, Emanuel Taropa, Paige Bailey, Zhifeng Chen,\net al.",
|
| 235 |
+
"venue": "arXiv preprint arXiv:2305.10403, 2023.",
|
| 236 |
+
"url": null
|
| 237 |
+
}
|
| 238 |
+
},
|
| 239 |
+
{
|
| 240 |
+
"6": {
|
| 241 |
+
"title": "Transformers as statisticians: Provable in-context learning with\nin-context algorithm selection.",
|
| 242 |
+
"author": "Yu Bai, Fan Chen, Huan Wang, Caiming Xiong, and Song Mei.",
|
| 243 |
+
"venue": "arXiv preprint arXiv:2306.04637, 2023.",
|
| 244 |
+
"url": null
|
| 245 |
+
}
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"7": {
|
| 249 |
+
"title": "Language models are few-shot learners.",
|
| 250 |
+
"author": "Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla\nDhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell,\net al.",
|
| 251 |
+
"venue": "Advances in neural information processing systems,\n33:1877\u20131901, 2020.",
|
| 252 |
+
"url": null
|
| 253 |
+
}
|
| 254 |
+
},
|
| 255 |
+
{
|
| 256 |
+
"8": {
|
| 257 |
+
"title": "Data distributional properties drive emergent in-context learning in\ntransformers.",
|
| 258 |
+
"author": "Stephanie Chan, Adam Santoro, Andrew Lampinen, Jane Wang, Aaditya Singh, Pierre\nRichemond, James McClelland, and Felix Hill.",
|
| 259 |
+
"venue": "Advances in Neural Information Processing Systems,\n35:18878\u201318891, 2022.",
|
| 260 |
+
"url": null
|
| 261 |
+
}
|
| 262 |
+
},
|
| 263 |
+
{
|
| 264 |
+
"9": {
|
| 265 |
+
"title": "Transformers implement functional gradient descent to learn\nnon-linear functions in context.",
|
| 266 |
+
"author": "Xiang Cheng, Yuxin Chen, and Suvrit Sra.",
|
| 267 |
+
"venue": "arXiv preprint arXiv:2312.06528, 2023.",
|
| 268 |
+
"url": null
|
| 269 |
+
}
|
| 270 |
+
},
|
| 271 |
+
{
|
| 272 |
+
"10": {
|
| 273 |
+
"title": "Comparison of model selection for regression.",
|
| 274 |
+
"author": "Vladimir Cherkassky and Yunqian Ma.",
|
| 275 |
+
"venue": "Neural computation, 15(7):1691\u20131714,\n2003.",
|
| 276 |
+
"url": null
|
| 277 |
+
}
|
| 278 |
+
},
|
| 279 |
+
{
|
| 280 |
+
"11": {
|
| 281 |
+
"title": "Rethinking attention with performers.",
|
| 282 |
+
"author": "Krzysztof Choromanski, Valerii Likhosherstov, David Dohan, Xingyou Song,\nAndreea Gane, Tamas Sarlos, Peter Hawkins, Jared Davis, Afroz Mohiuddin,\nLukasz Kaiser, et al.",
|
| 283 |
+
"venue": "arXiv preprint arXiv:2009.14794, 2020.",
|
| 284 |
+
"url": null
|
| 285 |
+
}
|
| 286 |
+
},
|
| 287 |
+
{
|
| 288 |
+
"12": {
|
| 289 |
+
"title": "Transformers learn higher-order optimization methods for in-context\nlearning: A study with linear models.",
|
| 290 |
+
"author": "Deqing Fu, Tian-Qi Chen, Robin Jia, and Vatsal Sharan.",
|
| 291 |
+
"venue": "arXiv preprint arXiv:2310.17086, 2023.",
|
| 292 |
+
"url": null
|
| 293 |
+
}
|
| 294 |
+
},
|
| 295 |
+
{
|
| 296 |
+
"13": {
|
| 297 |
+
"title": "What can transformers learn in-context? a case study of simple\nfunction classes.",
|
| 298 |
+
"author": "Shivam Garg, Dimitris Tsipras, Percy S Liang, and Gregory Valiant.",
|
| 299 |
+
"venue": "Advances in Neural Information Processing Systems,\n35:30583\u201330598, 2022.",
|
| 300 |
+
"url": null
|
| 301 |
+
}
|
| 302 |
+
},
|
| 303 |
+
{
|
| 304 |
+
"14": {
|
| 305 |
+
"title": "Looped transformers as programmable computers.",
|
| 306 |
+
"author": "Angeliki Giannou, Shashank Rajput, Jy-yong Sohn, Kangwook Lee, Jason D Lee, and\nDimitris Papailiopoulos.",
|
| 307 |
+
"venue": "arXiv preprint arXiv:2301.13196, 2023.",
|
| 308 |
+
"url": null
|
| 309 |
+
}
|
| 310 |
+
},
|
| 311 |
+
{
|
| 312 |
+
"15": {
|
| 313 |
+
"title": "How do transformers learn in-context beyond simple functions? a case\nstudy on learning with representations.",
|
| 314 |
+
"author": "Tianyu Guo, Wei Hu, Song Mei, Huan Wang, Caiming Xiong, Silvio Savarese, and\nYu Bai.",
|
| 315 |
+
"venue": "arXiv preprint arXiv:2310.10616, 2023.",
|
| 316 |
+
"url": null
|
| 317 |
+
}
|
| 318 |
+
},
|
| 319 |
+
{
|
| 320 |
+
"16": {
|
| 321 |
+
"title": "In-context learning creates task vectors.",
|
| 322 |
+
"author": "Roee Hendel, Mor Geva, and Amir Globerson.",
|
| 323 |
+
"venue": "arXiv preprint arXiv:2310.15916, 2023.",
|
| 324 |
+
"url": null
|
| 325 |
+
}
|
| 326 |
+
},
|
| 327 |
+
{
|
| 328 |
+
"17": {
|
| 329 |
+
"title": "In-context convergence of transformers.",
|
| 330 |
+
"author": "Yu Huang, Yuan Cheng, and Yingbin Liang.",
|
| 331 |
+
"venue": "arXiv preprint arXiv:2310.05249, 2023.",
|
| 332 |
+
"url": null
|
| 333 |
+
}
|
| 334 |
+
},
|
| 335 |
+
{
|
| 336 |
+
"18": {
|
| 337 |
+
"title": "Risks from learned optimization in advanced machine learning systems.",
|
| 338 |
+
"author": "Evan Hubinger, Chris van Merwijk, Vladimir Mikulik, Joar Skalse, and Scott\nGarrabrant.",
|
| 339 |
+
"venue": "arXiv preprint arXiv:1906.01820, 2019.",
|
| 340 |
+
"url": null
|
| 341 |
+
}
|
| 342 |
+
},
|
| 343 |
+
{
|
| 344 |
+
"19": {
|
| 345 |
+
"title": "Mistral 7b.",
|
| 346 |
+
"author": "Albert Q Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford,\nDevendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel,\nGuillaume Lample, Lucile Saulnier, et al.",
|
| 347 |
+
"venue": "arXiv preprint arXiv:2310.06825, 2023.",
|
| 348 |
+
"url": null
|
| 349 |
+
}
|
| 350 |
+
},
|
| 351 |
+
{
|
| 352 |
+
"20": {
|
| 353 |
+
"title": "Transformers are rnns: Fast autoregressive transformers with linear\nattention.",
|
| 354 |
+
"author": "Angelos Katharopoulos, Apoorv Vyas, Nikolaos Pappas, and Fran\u00e7ois\nFleuret.",
|
| 355 |
+
"venue": "In International conference on machine learning, pp. 5156\u20135165. PMLR, 2020.",
|
| 356 |
+
"url": null
|
| 357 |
+
}
|
| 358 |
+
},
|
| 359 |
+
{
|
| 360 |
+
"21": {
|
| 361 |
+
"title": "In-context learning in large language models learns label\nrelationships but is not conventional learning.",
|
| 362 |
+
"author": "Jannik Kossen, Tom Rainforth, and Yarin Gal.",
|
| 363 |
+
"venue": "arXiv preprint arXiv:2307.12375, 2023.",
|
| 364 |
+
"url": null
|
| 365 |
+
}
|
| 366 |
+
},
|
| 367 |
+
{
|
| 368 |
+
"22": {
|
| 369 |
+
"title": "Transformers as algorithms: Generalization and stability in\nin-context learning.",
|
| 370 |
+
"author": "Yingcong Li, Muhammed Emrullah Ildiz, Dimitris Papailiopoulos, and Samet Oymak.",
|
| 371 |
+
"venue": "In International Conference on Machine Learning, pp. 19565\u201319594. PMLR, 2023.",
|
| 372 |
+
"url": null
|
| 373 |
+
}
|
| 374 |
+
},
|
| 375 |
+
{
|
| 376 |
+
"23": {
|
| 377 |
+
"title": "One step of gradient descent is provably the optimal in-context\nlearner with one layer of linear self-attention.",
|
| 378 |
+
"author": "Arvind Mahankali, Tatsunori B Hashimoto, and Tengyu Ma.",
|
| 379 |
+
"venue": "arXiv preprint arXiv:2307.03576, 2023.",
|
| 380 |
+
"url": null
|
| 381 |
+
}
|
| 382 |
+
},
|
| 383 |
+
{
|
| 384 |
+
"24": {
|
| 385 |
+
"title": "Large language models as general pattern machines.",
|
| 386 |
+
"author": "Suvir Mirchandani, Fei Xia, Pete Florence, Brian Ichter, Danny Driess,\nMontserrat Gonzalez Arenas, Kanishka Rao, Dorsa Sadigh, and Andy Zeng.",
|
| 387 |
+
"venue": "arXiv preprint arXiv:2307.04721, 2023.",
|
| 388 |
+
"url": null
|
| 389 |
+
}
|
| 390 |
+
},
|
| 391 |
+
{
|
| 392 |
+
"25": {
|
| 393 |
+
"title": "In-context learning and induction heads.",
|
| 394 |
+
"author": "Catherine Olsson, Nelson Elhage, Neel Nanda, Nicholas Joseph, Nova DasSarma,\nTom Henighan, Ben Mann, Amanda Askell, Yuntao Bai, Anna Chen, et al.",
|
| 395 |
+
"venue": "arXiv preprint arXiv:2209.11895, 2022.",
|
| 396 |
+
"url": null
|
| 397 |
+
}
|
| 398 |
+
},
|
| 399 |
+
{
|
| 400 |
+
"26": {
|
| 401 |
+
"title": "Transformers can optimally learn regression mixture models.",
|
| 402 |
+
"author": "Reese Pathak, Rajat Sen, Weihao Kong, and Abhimanyu Das.",
|
| 403 |
+
"venue": "arXiv preprint arXiv:2311.08362, 2023.",
|
| 404 |
+
"url": null
|
| 405 |
+
}
|
| 406 |
+
},
|
| 407 |
+
{
|
| 408 |
+
"27": {
|
| 409 |
+
"title": "Linear transformers are secretly fast weight programmers.",
|
| 410 |
+
"author": "Imanol Schlag, Kazuki Irie, and J\u00fcrgen Schmidhuber.",
|
| 411 |
+
"venue": "In International Conference on Machine Learning, pp. 9355\u20139366. PMLR, 2021.",
|
| 412 |
+
"url": null
|
| 413 |
+
}
|
| 414 |
+
},
|
| 415 |
+
{
|
| 416 |
+
"28": {
|
| 417 |
+
"title": "Do pretrained transformers really learn in-context by gradient\ndescent?",
|
| 418 |
+
"author": "Lingfeng Shen, Aayush Mishra, and Daniel Khashabi.",
|
| 419 |
+
"venue": "arXiv preprint arXiv:2310.08540, 2023.",
|
| 420 |
+
"url": null
|
| 421 |
+
}
|
| 422 |
+
},
|
| 423 |
+
{
|
| 424 |
+
"29": {
|
| 425 |
+
"title": "Max-margin token selection in attention mechanism.",
|
| 426 |
+
"author": "Davoud Ataee Tarzanagh, Yingcong Li, Xuechen Zhang, and Samet Oymak.",
|
| 427 |
+
"venue": "In Thirty-seventh Conference on Neural Information Processing\nSystems, 2023.",
|
| 428 |
+
"url": null
|
| 429 |
+
}
|
| 430 |
+
},
|
| 431 |
+
{
|
| 432 |
+
"30": {
|
| 433 |
+
"title": "Gemini: a family of highly capable multimodal models.",
|
| 434 |
+
"author": "Gemini Team, Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac,\nJiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, et al.",
|
| 435 |
+
"venue": "arXiv preprint arXiv:2312.11805, 2023.",
|
| 436 |
+
"url": null
|
| 437 |
+
}
|
| 438 |
+
},
|
| 439 |
+
{
|
| 440 |
+
"31": {
|
| 441 |
+
"title": "Scan and snap: Understanding training dynamics and token composition\nin 1-layer transformer.",
|
| 442 |
+
"author": "Yuandong Tian, Yiping Wang, Beidi Chen, and Simon Du.",
|
| 443 |
+
"venue": "arXiv preprint arXiv:2305.16380, 2023a.",
|
| 444 |
+
"url": null
|
| 445 |
+
}
|
| 446 |
+
},
|
| 447 |
+
{
|
| 448 |
+
"32": {
|
| 449 |
+
"title": "Joma: Demystifying multilayer transformers via joint dynamics of mlp\nand attention.",
|
| 450 |
+
"author": "Yuandong Tian, Yiping Wang, Zhenyu Zhang, Beidi Chen, and Simon Du.",
|
| 451 |
+
"venue": "In NeurIPS 2023 Workshop on Mathematics of Modern Machine\nLearning, 2023b.",
|
| 452 |
+
"url": null
|
| 453 |
+
}
|
| 454 |
+
},
|
| 455 |
+
{
|
| 456 |
+
"33": {
|
| 457 |
+
"title": "Attention is all you need.",
|
| 458 |
+
"author": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones,\nAidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin.",
|
| 459 |
+
"venue": "Advances in neural information processing systems, 30, 2017.",
|
| 460 |
+
"url": null
|
| 461 |
+
}
|
| 462 |
+
},
|
| 463 |
+
{
|
| 464 |
+
"34": {
|
| 465 |
+
"title": "Transformers learn in-context by gradient descent.",
|
| 466 |
+
"author": "Johannes von Oswald, Eyvind Niklasson, Ettore Randazzo, Jo\u00e3o Sacramento,\nAlexander Mordvintsev, Andrey Zhmoginov, and Max Vladymyrov.",
|
| 467 |
+
"venue": "In International Conference on Machine Learning, pp. 35151\u201335174. PMLR, 2023a.",
|
| 468 |
+
"url": null
|
| 469 |
+
}
|
| 470 |
+
},
|
| 471 |
+
{
|
| 472 |
+
"35": {
|
| 473 |
+
"title": "Uncovering mesa-optimization algorithms in transformers.",
|
| 474 |
+
"author": "Johannes von Oswald, Eyvind Niklasson, Maximilian Schlegel, Seijin Kobayashi,\nNicolas Zucchet, Nino Scherrer, Nolan Miller, Mark Sandler, Max Vladymyrov,\nRazvan Pascanu, et al.",
|
| 475 |
+
"venue": "arXiv preprint arXiv:2309.05858, 2023b.",
|
| 476 |
+
"url": null
|
| 477 |
+
}
|
| 478 |
+
},
|
| 479 |
+
{
|
| 480 |
+
"36": {
|
| 481 |
+
"title": "Linformer: Self-attention with linear complexity.",
|
| 482 |
+
"author": "Sinong Wang, Belinda Z Li, Madian Khabsa, Han Fang, and Hao Ma.",
|
| 483 |
+
"venue": "arXiv preprint arXiv:2006.04768, 2020.",
|
| 484 |
+
"url": null
|
| 485 |
+
}
|
| 486 |
+
},
|
| 487 |
+
{
|
| 488 |
+
"37": {
|
| 489 |
+
"title": "Larger language models do in-context learning differently.",
|
| 490 |
+
"author": "Jerry Wei, Jason Wei, Yi Tay, Dustin Tran, Albert Webson, Yifeng Lu, Xinyun\nChen, Hanxiao Liu, Da Huang, Denny Zhou, et al.",
|
| 491 |
+
"venue": "arXiv preprint arXiv:2303.03846, 2023.",
|
| 492 |
+
"url": null
|
| 493 |
+
}
|
| 494 |
+
},
|
| 495 |
+
{
|
| 496 |
+
"38": {
|
| 497 |
+
"title": "Transformers are uninterpretable with myopic methods: a case study\nwith bounded dyck grammars.",
|
| 498 |
+
"author": "Kaiyue Wen, Yuchen Li, Bingbin Liu, and Andrej Risteski.",
|
| 499 |
+
"venue": "In Thirty-seventh Conference on Neural Information Processing\nSystems, 2023.",
|
| 500 |
+
"url": null
|
| 501 |
+
}
|
| 502 |
+
},
|
| 503 |
+
{
|
| 504 |
+
"39": {
|
| 505 |
+
"title": "Pretraining data mixtures enable narrow model selection capabilities\nin transformer models.",
|
| 506 |
+
"author": "Steve Yadlowsky, Lyric Doshi, and Nilesh Tripuraneni.",
|
| 507 |
+
"venue": "arXiv preprint arXiv:2311.00871, 2023.",
|
| 508 |
+
"url": null
|
| 509 |
+
}
|
| 510 |
+
},
|
| 511 |
+
{
|
| 512 |
+
"40": {
|
| 513 |
+
"title": "Trained transformers learn linear models in-context.",
|
| 514 |
+
"author": "Ruiqi Zhang, Spencer Frei, and Peter L Bartlett.",
|
| 515 |
+
"venue": "arXiv preprint arXiv:2306.09927, 2023.",
|
| 516 |
+
"url": null
|
| 517 |
+
}
|
| 518 |
+
}
|
| 519 |
+
],
|
| 520 |
+
"url": "http://arxiv.org/html/2402.14180v2"
|
| 521 |
+
}
|
20241030/2402.14576v3.json
ADDED
|
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Attention-Enhanced Prioritized Proximal Policy Optimization for Adaptive Edge Caching",
|
| 3 |
+
"abstract": "This paper tackles the growing issue of excessive data transmission in networks. With increasing traffic, backhaul links and core networks are under significant traffic, leading to the investigation of caching solutions at edge routers. Many existing studies utilize Markov Decision Processes (MDP) to tackle caching problems, often assuming decision points at fixed intervals; however, real-world environments are characterized by random request arrivals. Additionally, critical file attributes such as lifetime, size, and priority\nsignificantly impact the effectiveness of caching policies, yet existing research fails to integrate all these attributes in policy design. In this work, we model the caching problem using a Semi-Markov Decision Process (SMDP) to better capture the continuous-time nature of real-world applications, enabling caching decisions to be triggered by random file requests. We then introduce a Proximal Policy Optimization (PPO)-based caching strategy that fully considers file attributes like lifetime, size, and priority. Simulations show that our method outperforms a recent Deep Reinforcement Learning-based technique. To further advance our research, we improved the convergence rate of PPO by prioritizing transitions within the replay buffer through an attention mechanism. This mechanism evaluates the similarity between the current state and all stored transitions, assigning higher priorities to transitions that exhibit greater similarity.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "With the rapid expansion of mobile applications, there has been a notable rise in redundant data transmissions due to numerous users requesting content from centralized data centers. This surge in traffic has significantly strained backhaul links and core networks [1 ###reference_b1###]. Consequently, edge caching at routers has emerged as a promising strategy to alleviate traffic redundancy and reduce transmission delays, as demonstrated by recent research [2 ###reference_b2###, 3 ###reference_b3###].\nCurrent caching strategies can be classified into two main types: reactive and proactive [4 ###reference_b4###, 5 ###reference_b5###]. Reactive caching entails deciding whether to store a file only after it has been requested [1 ###reference_b1###, 6 ###reference_b6###, 7 ###reference_b7###]. Proactive methods, in contrast, rely on historical data to forecast future file popularity and pre-cache the content even before any requests are made [8 ###reference_b8###, 9 ###reference_b9###, 10 ###reference_b10###]. However, a key limitation of proactive caching is that it may result in low cache-hit ratios if the predicted content fails to become popular. Additionally, pre-caching content that may never be accessed wastes both communication bandwidth and valuable storage space, making reactive methods a more efficient alternative.\nAnother essential consideration is that caching inherently involves sequential decision-making, which is effectively modeled by the Markov Decision Process (MDP) framework, as outlined in [11 ###reference_b11###]. An MDP is defined by a set of states and actions available to an agent, who receives rewards based on their actions and the resulting states. The goal of the agent is to derive a policy that maximizes long-term expected rewards. If the model of the environment, such as the reward function and transition probabilities, is known, MDPs can be solved using Bellman optimality equations [12 ###reference_b12###]. However, in many practical scenarios, such information is unavailable. In these cases, model-free Reinforcement Learning (RL) techniques are employed, allowing an agent to learn from the environment through exploration and trial-and-error rather than relying on pre-existing knowledge of the environment [12 ###reference_b12###].\nThe majority of existing studies apply the MDP framework to model caching problems [13 ###reference_b13###]. However, MDP-based approaches assume decisions are made at fixed intervals [14 ###reference_b14###], which is not suitable for scenarios where caching decisions need to be made upon the arrival of requests. Since real-world environments feature requests arriving randomly at the edge router [15 ###reference_b15###], MDP is often inadequate. A more fitting alternative is the Semi-Markov Decision Process (SMDP) [11 ###reference_b11###, 12 ###reference_b12###], which accommodates state transitions occurring at uneven intervals. Like MDP, SMDP provides a framework for modeling decision-making processes, but it offers more flexibility since state transitions can occur at equal, exponential, or random intervals [11 ###reference_b11###]. In fact, MDP can be seen as a special case of SMDP where the intervals between state transitions are one unit of time. The work proposed in [16 ###reference_b16###] exemplifies the application of SMDP for caching.\nAnother critical aspect of the caching problem is that not all files have the same level of importance. Importance here reflects how much a user values having a file stored nearby to avoid delays in future access. For instance, a user who frequently needs to access real-time financial data, such as live stock market updates, might be willing to pay a premium to ensure that this data is cached close by, allowing for immediate retrieval without latency. This approach provides a significant advantage to the caching system: by prioritizing files based on their importance to users, the system can enhance its overall benefit and efficiency. In addition to the importance of files, other attributes like size and lifetime also play a key role in optimizing caching policies. For instance, consider a scenario where a popular file is large but has a short lifetime. Caching this file might require evicting several other files from the cache. If the file expires before it is accessed again, this would not only make caching it ineffective but could also degrade overall system performance. Yet, most existing caching policies fail to account for all these attributes when making caching decisions.\nJust as optimizing caching decisions requires careful consideration of these file characteristics, optimizing reinforcement learning algorithms also depends on making smart choices about which experiences to prioritize during training. Prioritizing transitions that closely resemble the current system state accelerates convergence by ensuring that the agent focuses on the most relevant experiences, directly informing the best policy updates. By concentrating on transitions closely related to its current state, the agent learns from experiences more applicable to its present situation, increasing the likelihood of policy effectiveness. This targeted approach reduces time spent on less relevant experiences, leading to more efficient learning and fewer updates needed for policy improvement. Moreover, learning from similar states minimizes variance in policy updates, stabilizing the learning process and resulting in smoother and more consistent progress toward convergence.\nMotivated by these challenges, this paper proposes a reactive caching method built upon the SMDP framework [11 ###reference_b11###], allowing for decision-making at random intervals, particularly when a file is requested at the edge router. Our main contributions are as follows:\nWe model the caching problem using SMDP, which better reflects the real-time nature of request arrivals. We also present a PPO-based caching strategy that leverages historical popularity data to develop a caching policy while accounting for the system\u2019s inherent uncertainties.\nWe incorporate multiple file attributes such as lifetime, size, and importance, along with popularity, in our caching decisions. To the best of our knowledge, our method is the first to integrate all these attributes into a comprehensive caching strategy, making it more applicable to practical environments.\nThrough simulations, we assess our method\u2019s performance under various scenarios and compare it against two recent Deep Reinforcement Learning (DRL)-based approaches that consider both file popularity and lifetime [17 ###reference_b17###] and [6 ###reference_b6###]. Results show that our approach consistently achieves a higher cache hit rate and total utility across different configurations, including varying cache sizes, request rates, and popularity distributions.\nWe improve the convergence speed of the PPO algorithm by incorporating an attention mechanism to prioritize transitions in the replay buffer. This mechanism evaluates the similarity between the current state and all transitions in the replay buffer, assigning higher priority to those with greater similarity. This approach accelerates convergence by focusing learning on more relevant transitions.\nThe remainder of the paper is structured as follows: Section II ###reference_### provides an overview of related work. Section III ###reference_### introduces our system model, while Section IV ###reference_### formulates the caching problem. Section V-A ###reference_### outlines our proposed caching algorithm. In Section VI ###reference_###, we discuss our experimental setup and results, and Section VII ###reference_### concludes the paper."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "II Related Work",
|
| 15 |
+
"text": "This section provides an overview of current caching techniques, covering both reactive and proactive strategies."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "2.1",
|
| 19 |
+
"parent_section_id": "2",
|
| 20 |
+
"section_name": "II-A Reactive Caching",
|
| 21 |
+
"text": "The study in [7 ###reference_b7###] introduces a new metric called \u2018virality\u2019, which measures the variation in file popularity over time. The authors use this metric along with popularity and size, to prioritize which files should be cached. Another approach detailed in [1 ###reference_b1###] involves a recommendation system-based model to predict the popularity of newly requested content. This prediction guides a DRL-based caching strategy aimed at optimizing optimize caching decisions by balancing latency and request frequency.\nIn [17 ###reference_b17###], the authors address the trade-off between communication costs and data freshness using an actor-critic DRL approach. They propose a utility function that combines these factors to improve caching efficiency. Similarly, in [6 ###reference_b6###] the authors employ the PPO algorithm, aiming to enhance cache hit rates while minimizing energy use. A variant of this approach is presented in [18 ###reference_b18###], where the DRL agent is penalized based on the age of cached files and available cache memory.\nIn [19 ###reference_b19###], the issue of average data transmission delay within cache storage constraints is tackled using deep reinforcement learning, initially formulated as an Integer Linear Programming (ILP) problem before applying DRL. The authors of [20 ###reference_b20###] propose deep actor-critic methods for reactive caching, focusing on maximizing cache hit rates and managing transmission delays in both centralized and decentralized settings.\nStudy [21 ###reference_b21###] integrates recommender systems with edge caching in mobile edge-cloud networks, aiming to reduce the long-term system cost by modeling user experience factors. In [22 ###reference_b22###], user preferences influence cache management, with higher preference content replacing lower preference items when space is limited. The study in [23 ###reference_b23###] focuses on vehicular networks, using region-based models to optimize content fetching locations and employing the Least Recently Used (LRU) strategy for cache management."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2.2",
|
| 25 |
+
"parent_section_id": "2",
|
| 26 |
+
"section_name": "II-B Proactive Caching",
|
| 27 |
+
"text": "Study [24 ###reference_b24###] addresses the \u2019slow start\u2019 problem in caching algorithms by calculating Euclidean distances to identify file similarities, assuming similar files are likely to be popular. In [8 ###reference_b8###], the caching of multi-view 3D videos is modeled models the caching of multi-view 3D videos using an MDP, combining Deep Deterministic Policy Gradient (DDPG) with a dynamic k-nearest neighbor (KNN) algorithm to adapt to varying action spaces.\nIn [9 ###reference_b9###], the device-to-device (D2D) caching problem is formulated as a multi-agent multi-armed bandit problem, with Q-learning used to coordinate caching decisions among users. To manage extensive action spaces, a modified combinatorial Upper Confidence Bound (UCB) algorithm is employed. Study [10 ###reference_b10###] focuses on optimizing caching placement in a two-tier system, using Difference of Convex (DC) programming to maximize offloading probability through local caching and sharing.\nStudy [25 ###reference_b25###] proposes a computation offloading method that combines demand prediction using a Spatial-Temporal Graph Neural Network (STGNN) with a caching decision algorithm based on predicted demand. The authors in [26 ###reference_b26###] address varying content popularity by predicting average popularity and adjusting caching probabilities accordingly, managing different content popularities across runtime sessions.\nProactive caching faces challenges, such as potential low cache hit rates for pre-cached unpopular content and limited adaptability to dynamic user behavior changes. This can result in suboptimal performance if user preferences shift and previous caching decisions no longer apply."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "2.3",
|
| 31 |
+
"parent_section_id": "2",
|
| 32 |
+
"section_name": "II-C Motivation",
|
| 33 |
+
"text": "Real-world systems often contain files with diverse attributes that impact caching effectiveness, including lifetime, importance, and size. Our review reveals that current methods do not fully consider these attributes in caching decisions. Our proposed policy is the first to integrate a comprehensive set of file characteristics, offering a novel approach to caching.\nAdditionally, many existing methods model caching as a discrete-time problem [8 ###reference_b8###] [9 ###reference_b9###] [27 ###reference_b27###] [1 ###reference_b1###] [17 ###reference_b17###] [6 ###reference_b6###] [13 ###reference_b13###] [19 ###reference_b19###] [20 ###reference_b20###], which conflicts with the continuous-time nature of request arrivals. We address this by formulating the problem as an SMDP and developing a DRL-based reactive caching scheme. This approach maintains a brief request history and employs PPO to optimize the DRL agent\u2019s policy, enabling it to make adaptive caching decisions based on current and past experiences."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "3",
|
| 37 |
+
"parent_section_id": null,
|
| 38 |
+
"section_name": "III System Model",
|
| 39 |
+
"text": ""
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "3.1",
|
| 43 |
+
"parent_section_id": "3",
|
| 44 |
+
"section_name": "III-A System Architecture",
|
| 45 |
+
"text": "We consider a network scenario where end users access the Internet through an edge router, which connects to a data center on one side and the end users on the other side, as illustrated in Fig. 1 ###reference_###.\nThe cloud data center is assumed to have sufficient capacity to store all contents [24 ###reference_b24###]. When an end user requests a file, a copy is created and sent to the user via the edge router over the Internet. The edge router has a limited cache capacity of files. If the requested file is already cached, the request is fulfilled from the edge router\u2019s cache rather than retrieving it from the data center. Hereafter, we use \u201ccache\u201d to refer to the edge router\u2019s cache. End users are devices that request files based on their needs and preferences [24 ###reference_b24###]. Let denote the set of users connected to the edge router. Requests are represented by , where denotes the request, irrespective of the user making it. Requests are processed in the order they are created.\n###figure_1### Files, generated by sources such as cameras, sensors, and computers, are stored in the data center. Let denote the set of file types, where represents the type of file. Each file type may have distinct characteristics including popularity, lifetime, size, and importance. These characteristics are further explained with real-life examples:\nPopularity: The number of times a file is requested by users [28 ###reference_b28###]. Popular files, such as trending videos, are requested more frequently than less popular ones.\nLifetime: The duration for which a file is valid from the time it leaves the data center [18 ###reference_b18###]. For example, location-based services require timely updates, so files have a predetermined lifetime upon generation [29 ###reference_b29###].\nSize: Files may vary in size depending on their type and content. For instance, a movie file is typically larger than a text file.\nImportance: Reflects the value a file holds for users based on its relevance and necessity. Files that are crucial for timely access, such as real-time financial data or emergency updates, are considered more important than less critical files like free ebooks or casual videos.\nA caching policy must consider all file characteristics simultaneously. Focusing on only one attribute may overlook other important features. For example, a highly popular file with a short lifetime, large size, and low importance might not be as beneficial as a less popular file with a longer lifetime, smaller size, and higher importance.\nIn this paper, we consider four characteristics for each file. Let denote the popularity values for each file type, where represents the popularity of file type . This indicates the number of times file type is requested within a predefined time interval . We denote the lifetime, size, and importance of each file type at time by , , and respectively.\nThe utility of a cached file, defined as a function of its freshness and importance, is used to determine its value. Freshness is the age of the file normalized by its lifetime at time :\nwhere is the current time, is the lifetime, and is the generation time of file type .\nThe utility function for file type at time is denoted as:\nwhere represents the utility of file type at time . The function depends on two key factors: the freshness of the file, denoted as , and the importance of the file type, represented by . The function captures the interaction between these factors in determining the overall utility. In general, the utility increases as the importance of the file grows, reflecting the higher benefit the file brings to the system. Notably, there is an inverse relationship between the file\u2019s freshness and its utility; as the freshness decreases over time, indicating the file is becoming older, the utility correspondingly. While the definition of a utility function often varies according to individual user perspectives and specific applications, there is no universally applicable definition. However, our proposed method is flexible and can be utilized with any definition of a utility function."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "3.2",
|
| 49 |
+
"parent_section_id": "3",
|
| 50 |
+
"section_name": "III-B System Uncertainties",
|
| 51 |
+
"text": "Caching involves uncertainties such as random request arrivals and the unpredictable impact of future requests on the cache. These uncertainties impact the decision-making process of the edge router. Below, we outline these uncertainties and our assumptions."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "3.2.1",
|
| 55 |
+
"parent_section_id": "3.2",
|
| 56 |
+
"section_name": "III-B1 Random Arrivals of Requests",
|
| 57 |
+
"text": "Requests for content often arrive randomly due to factors like user behavior and network conditions. We model request arrivals using a Poisson process, a common model for characterizing user request patterns [30 ###reference_b30###]. The parameter represents the request rate, and is the expected time between consecutive requests, where . We assume that the edge router has no prior knowledge of the Poisson process or its parameters."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "3.2.2",
|
| 61 |
+
"parent_section_id": "3.2",
|
| 62 |
+
"section_name": "III-B2 Unknown Effect of Upcoming Requests on the Cache",
|
| 63 |
+
"text": "Given the types of files with unique characteristics (popularity, lifetime, size, importance), there is no prior knowledge of future content requests. Consequently, caching a file may have an unforeseen impact on subsequent decisions.\nThese uncertainties significantly affect caching performance. In section V-A ###reference_###, we discuss how our caching policy addresses these uncertainties and propose an approach to optimize the hit rate in the long run."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "4",
|
| 67 |
+
"parent_section_id": null,
|
| 68 |
+
"section_name": "IV Problem Formulation",
|
| 69 |
+
"text": "The edge router\u2019s task involves deciding which files to cache based on several factors, including the frequency of user requests, file characteristics, and the available cache space. This decision-making process is sequential and can be effectively modeled using MDPs, as detailed in [11 ###reference_b11###].\nAn MDP consists of five components: state, action, system dynamics, reward function, and policy. In this framework, a state represents the system\u2019s status at a given time. The agent selects an action based on the policy, transitions to a new state, and receives a reward reflecting the action\u2019s quality. The process continues indefinitely (infinite horizon) or until a final state is reached (finite horizon) [12 ###reference_b12###]. For the caching problem, the infinite-horizon case is appropriate since there is no final state.\nGiven that request arrivals are continuous, a continuous-time variant of MDP is necessary. We propose using the SMDP framework, which accommodates variable transition times [11 ###reference_b11###]. The following sections elaborate on the SMDP formalism and its components."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "4.1",
|
| 73 |
+
"parent_section_id": "4",
|
| 74 |
+
"section_name": "IV-A SMDP Formalism",
|
| 75 |
+
"text": "An SMDP is defined by the tuple , where is the state space, is the action space, represents transition times, is the reward function, and denotes the policy."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "4.1.1",
|
| 79 |
+
"parent_section_id": "4.1",
|
| 80 |
+
"section_name": "IV-A1 States of the System",
|
| 81 |
+
"text": "The system state at time is denoted by and includes:\nwhere indicates the unoccupied percentage of cache memory, which is expressed as follows.\nwhere is the cache capacity. The vector , , is a binary vector, where a value of 0 indicates that a file is not cached and a value of 1 indicates that the file is already cached. The vectors , indicates the utility of files and if is not currently cached. represents the number of times that each file type has been requested within recent requests. , and represent the importance, lifetime and size of each file type within recent requests."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "4.1.2",
|
| 85 |
+
"parent_section_id": "4.1",
|
| 86 |
+
"section_name": "IV-A2 Actions",
|
| 87 |
+
"text": "At any time , the agent can take one of two actions: (cache the file) or (do not cache the file). When the cache is full, the file with the lowest utility is removed to make space for the new file. If more space is needed, files with the next lowest utilities are also removed."
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "4.1.3",
|
| 91 |
+
"parent_section_id": "4.1",
|
| 92 |
+
"section_name": "IV-A3 Dynamics of the System",
|
| 93 |
+
"text": "In our system, formulated as an SMDP with random task arrivals, the dynamics are governed by both the state transition probabilities and the transition times between states . This makes the system\u2019s behavior more complex compared to standard MDPs, as both time and state transitions influence decision-making. If these dynamics, along with the reward function, were fully known, Bellman equations could be used to obtain the optimal policy. However, since such information is often unavailable in real-world problems, we employ reinforcement learning to iteratively learn these dynamics and optimize decision-making through experience. The specifics of the RL algorithm applied will be described in the next section."
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "4.1.4",
|
| 97 |
+
"parent_section_id": "4.1",
|
| 98 |
+
"section_name": "IV-A4 Instant Reward and Long-term Goal",
|
| 99 |
+
"text": "The instant reward is defined as:\nwhere the first term is the weighted utility of cached files, and the second represents unused cache space. and are weighing coefficients.\nThe long-term goal is to maximize the average accumulated worth of cached files while minimizing the average unoccupied portion of the cache."
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"section_id": "4.1.5",
|
| 103 |
+
"parent_section_id": "4.1",
|
| 104 |
+
"section_name": "IV-A5 The Policy",
|
| 105 |
+
"text": "The policy () determines the optimal action to take in each state to achieve the long-term goal."
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"section_id": "5",
|
| 109 |
+
"parent_section_id": null,
|
| 110 |
+
"section_name": "Enhanced PPO Algorithm with Prioritized Replay Buffer Using Attention Mechanisms",
|
| 111 |
+
"text": "In this section, we introduce an enhanced version of the PPO [32 ###reference_b32###] algorithm, which incorporates a prioritized replay buffer that leverages attention mechanisms. This enhancement aims to improve learning efficiency and policy performance by prioritizing transitions that are more pertinent to the current state of the agent. We first provide a detailed overview of the PPO algorithm, followed by an explanation of how the transitions are prioritized using attention mechanisms."
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"section_id": "5.1",
|
| 115 |
+
"parent_section_id": "5",
|
| 116 |
+
"section_name": "Proximal Policy Optimization",
|
| 117 |
+
"text": "PPO is a popular reinforcement learning algorithm known for its stability and ease of implementation. PPO addresses the challenges of policy optimization by introducing a clipped objective function, which prevents large policy updates and stabilizes training.\nThe core idea of PPO is to maximize the expected reward while ensuring that the new policy does not deviate excessively from the old policy. PPO achieves this through a surrogate objective function, which is given by:\nwhere is the probability of taking action in state under the policy parameterized by , is the probability under the previous policy, is the advantage function, and is a clipping parameter.\nPPO updates the policy by maximizing this objective function using stochastic gradient ascent. The clipped objective ensures that the new policy does not deviate significantly from the old policy, balancing exploration and exploitation.\nThe advantage function measures the relative value of an action compared to the baseline:\nwhere and represent the values of the current state and the next state, respectively. In the context of a standard MDP, is equal to 1 because state transitions occur at regular intervals, and the time between consecutive state transitions is fixed. As a result, is raised to the power of 1, simplifying the equation. However, in SMDP, the state transition times vary, meaning that can take different values depending on the duration between transitions. In this case, the equation is modified by raising to the power of , which accounts for the variable time intervals between state transitions. This adjustment allows SMDP to more accurately reflect the delayed rewards over non-uniform time steps, ensuring that the future reward is appropriately discounted based on the actual time elapsed between state transitions [33 ###reference_b33###].\nThe value function is updated alongside the policy using a separate loss function. The loss for the value function is typically the squared error between the predicted value and the target return :\nThis value loss ensures that the policy update is accompanied by an accurate estimation of state values.\nThe target return is the one-step return defined as:\nThis formulation allows the value network to minimize the error between the current estimated value and the calculated return based on the immediate reward and future discounted value."
|
| 118 |
+
},
|
| 119 |
+
{
|
| 120 |
+
"section_id": "5.2",
|
| 121 |
+
"parent_section_id": "5",
|
| 122 |
+
"section_name": "Prioritizing Transitions with Attention Mechanisms",
|
| 123 |
+
"text": "To further enhance the PPO algorithm, we incorporate a prioritized replay buffer using an attention mechanism."
|
| 124 |
+
},
|
| 125 |
+
{
|
| 126 |
+
"section_id": "5.2.1",
|
| 127 |
+
"parent_section_id": "5.2",
|
| 128 |
+
"section_name": "V-B1 Attention Mechanism for Prioritization",
|
| 129 |
+
"text": "In our approach, we utilize the attention mechanism with the key-query-value (KQV) framework to compute the similarity between the current state and the transitions stored in the replay buffer. Let the current state be denoted as , and the transitions in the replay buffer be represented as a set , where each transition in the replay buffer is characterized by its state , action , reward , transition time and next state .\nWe use the following steps to compute the attention-based priority for each transition:\nCompute Attention Scores: Define the query as the embedding of the current state , and let the keys and values be the embeddings of the states in the replay buffer. The attention score for the transition is calculated using:\nwhere is the size of the replay buffer. The score function measures the similarity between the query and the key . We use the dot product as the score function:\nCalculate Priorities: The priority of a transition is proportional to its attention score. Define the priority as:\nUpdate Probabilities: The probability of transition being sampled is:\nwhere the value of determines the degree of prioritization, with corresponding to the uniform sampling case, i.e., transition is sampled randomly.\nTo adapt to environment change, adjustments are made by applying importance sampling weights, as illustrated below:\nwhere controls the degree of importance sampling. When is 0, there is no importance sampling, whereas when is 1, full importance sampling is employed.\n is multiplied by the loss function to control the impact of each transition on updating the neural network.\nBy integrating this attention-based prioritization into PPO, the algorithm evaluates the similarity between the current state and all stored transitions, assigning higher probability of sampling to transitions that exhibit greater similarity. By focusing on transitions with similar states with the current one, we ensure that the agent learns from experiences that are more relevant to its current situation. Prioritizing similar transitions leads to more efficient updates since transitions that resemble the current state are more likely to provide useful information for the decision-making process."
|
| 130 |
+
},
|
| 131 |
+
{
|
| 132 |
+
"section_id": "5.3",
|
| 133 |
+
"parent_section_id": "5",
|
| 134 |
+
"section_name": "The loss function",
|
| 135 |
+
"text": "We modify the loss function of the value network in Eq. (5 ###reference_###) as follows:\nThis updated loss function incorporates different components, including 1-step and n-step Temporal Difference (TD) losses, and an L2 norm regularization term. The parameters and control the relative contributions of each term to the overall loss function. By integrating these elements, the value network can effectively learn from both immediate and long-term rewards, while avoiding overfitting.\nThe 1-step TD error, denoted as , is defined as:\nThe n-step TD loss, represented as , is expressed as:\nwhere refers to the state encountered after steps, and is the cumulative transition time from to . The cumulative discounted reward is calculated as:\nwhere represents the transition time from state to . is the immediate reward at the step. Incorporating n-step returns ensures that the values of subsequent states are propagated back to preceding states, enhancing the initial training process.\nis an L2 regularization term designed to reduce overfitting by penalizing large weights."
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"section_id": "6",
|
| 139 |
+
"parent_section_id": null,
|
| 140 |
+
"section_name": "VI Experimental Setup and Results",
|
| 141 |
+
"text": "This section begins with a description of the experimental setup and the tools utilized to implement our Proposed Caching Algorithm (PCA). Next, we introduce two relevant and recent DRL algorithms, which will serve as baselines for comparison.\nOur system model was simulated and the DRL agent was trained using Python 3. To streamline the development of neural networks, we employed the TensorFlow platform as described in [34 ###reference_b34###]."
|
| 142 |
+
},
|
| 143 |
+
{
|
| 144 |
+
"section_id": "6.1",
|
| 145 |
+
"parent_section_id": "6",
|
| 146 |
+
"section_name": "VI-A Configuration and Parameters",
|
| 147 |
+
"text": "The DRL algorithm was implemented with 2 neural networks for actor and critic, each featuring 3 layers. The weights of the network were initialized within the range of [-0.1, 0.1], while biases were set to 0.1. ReLU served as the activation function. Additional parameter details can be found in Table I ###reference_###.\nFile request probabilities follow a Zipf distribution, characterized by the parameter where . In this distribution, the likelihood of the file being requested is given by [30 ###reference_b30###], with defined as:\nThe value of influences the skewness of the Zipf distribution. When approaches 1, the likelihood of requesting the most popular file increases significantly compared to other files. Conversely, if is close to 0, the popularity of files becomes more evenly distributed. The utility function is set to increase linearly with the importance of a file and exponentially with its freshness."
|
| 148 |
+
},
|
| 149 |
+
{
|
| 150 |
+
"section_id": "6.2",
|
| 151 |
+
"parent_section_id": "6",
|
| 152 |
+
"section_name": "VI-B Baselines",
|
| 153 |
+
"text": "In this part, we introduce the baseline methods used for evaluating the performance of the proposed DRL approach.\nCTD: Our DRL caching algorithm is compared against the approach presented in [17 ###reference_b17###], where the authors modeled the caching problem as a discrete-time MDP and developed a DRL method to assist the edge router in deciding which files to cache based on various system states. In [17 ###reference_b17###], file characteristics such as differing popularities and lifetimes were considered. However, other attributes like importance and file size were not included in their model. In our result comparisons, we refer to the approach in [17 ###reference_b17###] as CTD.\nRLTD: Another baseline we use for comparison is from [6 ###reference_b6###]. This baseline proposes a DRL-based caching scheme, also considering the freshness and limited lifetime of data files. They proposed a distributed DRL approach for a hierarchical architecture where each cache in the hierarchy has a separate DRL agent operating independently. Since our work considers a single-level cache, we adapted this baseline to our system model by implementing it within a single-level cache. We present the results based on this configuration and call it RLTD."
|
| 154 |
+
},
|
| 155 |
+
{
|
| 156 |
+
"section_id": "6.3",
|
| 157 |
+
"parent_section_id": "6",
|
| 158 |
+
"section_name": "VI-C Evaluation Criteria",
|
| 159 |
+
"text": "Our proposed method is assessed using the following performance indicators:\nCache Hits: After training and policy convergence, different algorithms are tested with an additional 1000 user requests under identical conditions. Cache hits refer to the number of times a requested item is successfully retrieved from the cache. The primary objective of any caching strategy is to enhance this metric.\nTotal Utility: When a requested file is found in the cache, its utility score is added to the cumulative utility. A higher aggregate utility signifies that the caching strategy effectively retains files with greater utility values.\nRate of Convergence: An RL agent eventually stabilizes at its peak average reward. The efficiency of the algorithm is judged by how quickly it reaches this stable reward level. Rapid convergence is a key indicator of the learning algorithm\u2019s effectiveness."
|
| 160 |
+
},
|
| 161 |
+
{
|
| 162 |
+
"section_id": "6.4",
|
| 163 |
+
"parent_section_id": "6",
|
| 164 |
+
"section_name": "VI-D Experimental Findings",
|
| 165 |
+
"text": "This section begins with a comparative analysis of our simulation results against those of the CTD and RLTD methods, focusing on metrics such as total utility and cache hit count. We also compare the performance of our enhanced PPO algorithm with the original PPO algorithm to demonstrate how our enhancements lead to faster convergence.\nFigures 2 ###reference_### and 3 ###reference_### illustrate the hit counts and total utility across 1000 trials for varying values of the Zipf parameter (). The parameter governs the degree of skewness in the popularity distribution. A value of near 0 indicates that file popularities are relatively uniform, meaning there is minimal disparity between the most and least popular files. Conversely, when approaches 1, the distribution becomes highly skewed, with a few files dominating in popularity while others remain relatively obscure. This skewness means that caching the most popular files leads to a higher hit count, as increasing from 0 to 1 enhances the likelihood of such files being requested. Consequently, as rises, the total utility also increases due to the higher probability of retaining more frequently requested files, which are more likely to be up-to-date.\nPCA outperforms both CTD and RLTD in terms of hit rate and total utility across all values of , ranging from 0 to 1. This performance gap is due to PCA\u2019s ability to consider a wider range of file features, such as popularity, lifetime, importance, and size, which allows it to make more informed caching decisions. As the skewness increases, indicating a more concentrated demand for popular files, PCA optimizes cache usage by prioritizing high-utility files based on their combined features, not just popularity and lifetime like CTD and RLTD.\n###figure_2### ###figure_3### Figures 4 ###reference_### and 5 ###reference_### depict the hit counts and total utility for varying request rates, denoted by , across 1000 trials. A lower corresponds to longer intervals between consecutive requests, which is associated with reduced hit counts. This is because longer interarrival times increases the likelihood of cached files expiring before reuse due to their finite lifetimes. Conversely, a higher means requests are more frequent, allowing the cache to fulfill more requests before files expire. This results in better performance in terms of both hit count and total utility. Our proposed method demonstrates superior performance compared to existing approaches for different values of .\n###figure_4### ###figure_5### Increasing the cache size enhances the system\u2019s capacity to store more files, which generally leads to a higher number of requests being served directly from the cache rather than fetching them from the data center. This increased capacity results in improved hit counts with larger caches. As illustrated in Figure 6 ###reference_###, our proposed approach consistently performs better than the benchmark caching strategies across various cache sizes. Additionally, as shown in Figure 7 ###reference_###, our method also surpasses the benchmark methods in terms of total utility.\n###figure_6### ###figure_7### To showcase the benefits of using SMDP, we conducted an experiment with an alternative version of our caching algorithm, where the problem is framed using a discrete-time MDP instead of SMDP. In the discrete-time MDP setup, decisions are made at the beginning of each time interval, causing the agent to delay actions until the next time slot arrives, even if there are pending requests. Table II ###reference_### shows the hit counts for the MDP and SMDP models. Since the agent in the MDP framework waits for a new time slot to make a decision, the files already in the cache continue to age, and they may even expire before being accessed. In contrast, SMDP allows the agent to make decisions as soon as a request arrives, optimizing the time that files remain cached for serving requests. As a result, the hit rate improves because cached files are utilized more efficiently. This increase in hit rate is especially significant when the request rate is higher, as more frequent requests with smaller interarrival times benefit from the SMDP\u2019s ability to respond immediately, reducing the likelihood of expired or outdated cached files.\nThrough extensive experimentation, we demonstrate that PCA significantly outperforms both CTD and RLTD in terms of cache hit rate and total utility under various scenarios. Additionally, we find that RLTD performs better than CTD. In the following sections, we explain the key reasons for these performance differences.\nThe superior performance of PCA can be attributed to two major contributions. First, PCA incorporates a comprehensive set of file features in its decision-making process, including file popularity, lifetime, importance, and size. In contrast, both CTD and RLTD limit their scope to only two features: file lifetime and popularity. By considering a richer set of features, PCA is better equipped to optimize caching decisions based on the specific characteristics of each file. This allows PCA to prioritize files not only based on their popularity but also on their importance and size, leading to a more efficient use of the cache and improved hit rates.\nSecond, PCA is formulated using SMDP, which enables decision-making at any moment when a request arrives at the edge router. This flexibility allows PCA to immediately respond to incoming requests and take action as needed, maximizing the use of cached files. On the other hand, both CTD and RLTD are based on a standard MDP, where decisions are only made at the beginning of fixed time intervals. As a result, when requests arrive between time intervals, the system must wait until the next interval to take action, which can cause delays in responding to requests and lead to lower hit rates. This limitation in MDP-based approaches explains why PCA, with its continuous decision-making capability, performs better, particularly in environments with high request rates and variable interarrival times.\nThe advantage of RLTD over CTD lies in their respective reward functions. CTD\u2019s reward function provides instant rewards solely based on the most recent requested file, which limits the feedback to the immediate outcome of the agent\u2019s actions. In contrast, both RLTD and PCA use a more sophisticated reward function that takes into account the history of cached files. By considering a delayed reward system, these approaches better reflect the cumulative impact of previous caching decisions. This delayed feedback gives a more accurate evaluation of the agent\u2019s long-term performance, rather than focusing only on short-term gains from responding to the most recent request. Consequently, RLTD and PCA are able to optimize caching policies more effectively over time, leading to better overall results compared to CTD.\nIn our simulations, we observed a significant improvement in convergence speed with the enhanced PPO algorithm that incorporates experience prioritization using an attention mechanism in the replay buffer. The results are illustrated in Figure 8 ###reference_###, which compares the average reward over time during the training process for the standard PPO and PCA. The plot clearly shows that PCA converges much faster, achieving higher average rewards earlier in the training process compared to the baseline PPO. Particularly, the enhanced PPO algorithm reaches its stable average reward faster than the standard PPO.\nThe rapid convergence of the enhanced PPO can be attributed to the effective prioritization of experiences in the replay buffer. By leveraging the attention mechanism, the algorithm focuses on more relevant experiences, which accelerates the learning process and improves overall performance. This prioritization ensures that the agent learns from the most impactful transitions more frequently, facilitating a more efficient exploration of the state space. Consequently, this leads to quicker adaptation and refinement of the policy, thereby achieving superior performance in less time.\n###figure_8###"
|
| 166 |
+
},
|
| 167 |
+
{
|
| 168 |
+
"section_id": "7",
|
| 169 |
+
"parent_section_id": null,
|
| 170 |
+
"section_name": "VII Conclusion",
|
| 171 |
+
"text": "In conclusion, our research presents a novel approach to caching challenges by leveraging an SMDP model to handle the real-world scenarios, where file requests arrive randomly at the edge router. Our proposed PPO-based caching method integrates a wide range of file attributes, such as popularity, lifetime, size, and importance. Simulation results highlight its superior performance compared to existing DRL-based methods, demonstrating improved efficiency. Additionally, we have enhanced the PPO algorithm by incorporating an attention mechanism to prioritize transitions in the replay buffer, leading to accelerated convergence and further improvements in performance."
|
| 172 |
+
}
|
| 173 |
+
],
|
| 174 |
+
"appendix": [],
|
| 175 |
+
"tables": {
|
| 176 |
+
"1": {
|
| 177 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S6.T1\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">TABLE I: </span>parameter settings</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S6.T1.11\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S6.T1.11.12.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_th_row ltx_border_t\" id=\"S6.T1.11.12.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T1.11.12.1.1.1\">Notation</span></th>\n<th class=\"ltx_td ltx_align_justify ltx_align_top ltx_th ltx_th_column ltx_border_t\" id=\"S6.T1.11.12.1.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S6.T1.11.12.1.2.1\">\n<span class=\"ltx_p\" id=\"S6.T1.11.12.1.2.1.1\" style=\"width:216.8pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T1.11.12.1.2.1.1.1\">value</span></span>\n</span>\n</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S6.T1.1.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S6.T1.1.1.1\"></th>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S6.T1.1.1.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S6.T1.1.1.2.1\">\n<span class=\"ltx_p\" id=\"S6.T1.1.1.2.1.1\" style=\"width:216.8pt;\">50</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T1.2.2\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S6.T1.2.2.1\"></th>\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S6.T1.2.2.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S6.T1.2.2.2.1\">\n<span class=\"ltx_p\" id=\"S6.T1.2.2.2.1.1\" style=\"width:216.8pt;\">0.99</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T1.3.3\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S6.T1.3.3.1\"></th>\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S6.T1.3.3.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S6.T1.3.3.2.1\">\n<span class=\"ltx_p\" id=\"S6.T1.3.3.2.1.1\" style=\"width:216.8pt;\">10000</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T1.4.4\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S6.T1.4.4.1\"></th>\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S6.T1.4.4.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S6.T1.4.4.2.1\">\n<span class=\"ltx_p\" id=\"S6.T1.4.4.2.1.1\" style=\"width:216.8pt;\">0.6 linearly increased to 1</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T1.5.5\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S6.T1.5.5.1\"></th>\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S6.T1.5.5.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S6.T1.5.5.2.1\">\n<span class=\"ltx_p\" id=\"S6.T1.5.5.2.1.1\" style=\"width:216.8pt;\">0.4</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T1.11.13.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S6.T1.11.13.1.1\">Batch size</th>\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S6.T1.11.13.1.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S6.T1.11.13.1.2.1\">\n<span class=\"ltx_p\" id=\"S6.T1.11.13.1.2.1.1\" style=\"width:216.8pt;\">64</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T1.6.6\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S6.T1.6.6.1\"></th>\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S6.T1.6.6.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S6.T1.6.6.2.1\">\n<span class=\"ltx_p\" id=\"S6.T1.6.6.2.1.1\" style=\"width:216.8pt;\">10000</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T1.7.7\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S6.T1.7.7.1\">\n for each file type</th>\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S6.T1.7.7.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S6.T1.7.7.2.1\">\n<span class=\"ltx_p\" id=\"S6.T1.7.7.2.1.1\" style=\"width:216.8pt;\">Randomly generated from [10, 30]</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T1.8.8\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S6.T1.8.8.1\">\n for each file type</th>\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S6.T1.8.8.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S6.T1.8.8.2.1\">\n<span class=\"ltx_p\" id=\"S6.T1.8.8.2.1.1\" style=\"width:216.8pt;\">Randomly generated from [0.1, 0.9]</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T1.9.9\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S6.T1.9.9.1\">\n for each file type</th>\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S6.T1.9.9.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S6.T1.9.9.2.1\">\n<span class=\"ltx_p\" id=\"S6.T1.9.9.2.1.1\" style=\"width:216.8pt;\">Randomly generated from [100, 1000]</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T1.10.10\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S6.T1.10.10.1\"></th>\n<td class=\"ltx_td ltx_align_justify ltx_align_top\" id=\"S6.T1.10.10.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S6.T1.10.10.2.1\">\n<span class=\"ltx_p\" id=\"S6.T1.10.10.2.1.1\" style=\"width:216.8pt;\">0.2</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T1.11.11\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_b\" id=\"S6.T1.11.11.1\"></th>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_b\" id=\"S6.T1.11.11.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S6.T1.11.11.2.1\">\n<span class=\"ltx_p\" id=\"S6.T1.11.11.2.1.1\" style=\"width:216.8pt;\">[0, 1]</span>\n</span>\n</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 178 |
+
"capture": "TABLE I: parameter settings"
|
| 179 |
+
},
|
| 180 |
+
"2": {
|
| 181 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S6.T2\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">TABLE II: </span>Hit count for MDP and SMDP under different request rates</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S6.T2.3\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S6.T2.3.3\">\n<th class=\"ltx_td ltx_th ltx_th_column ltx_th_row ltx_border_l ltx_border_r ltx_border_t\" id=\"S6.T2.3.3.4\"></th>\n<th class=\"ltx_td ltx_align_justify ltx_align_top ltx_th ltx_th_column ltx_border_t\" id=\"S6.T2.1.1.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S6.T2.1.1.1.1\">\n<span class=\"ltx_p\" id=\"S6.T2.1.1.1.1.1\" style=\"width:42.7pt;\"></span>\n</span>\n</th>\n<th class=\"ltx_td ltx_align_justify ltx_align_top ltx_th ltx_th_column ltx_border_t\" id=\"S6.T2.2.2.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S6.T2.2.2.2.1\">\n<span class=\"ltx_p\" id=\"S6.T2.2.2.2.1.1\" style=\"width:42.7pt;\"></span>\n</span>\n</th>\n<th class=\"ltx_td ltx_align_justify ltx_align_top ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S6.T2.3.3.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S6.T2.3.3.3.1\">\n<span class=\"ltx_p\" id=\"S6.T2.3.3.3.1.1\" style=\"width:42.7pt;\"></span>\n</span>\n</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S6.T2.3.4.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_l ltx_border_r ltx_border_t\" id=\"S6.T2.3.4.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T2.3.4.1.1.1\">MDP</span></th>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S6.T2.3.4.1.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S6.T2.3.4.1.2.1\">\n<span class=\"ltx_p\" id=\"S6.T2.3.4.1.2.1.1\" style=\"width:42.7pt;\">405</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_t\" id=\"S6.T2.3.4.1.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S6.T2.3.4.1.3.1\">\n<span class=\"ltx_p\" id=\"S6.T2.3.4.1.3.1.1\" style=\"width:42.7pt;\">431</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S6.T2.3.4.1.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S6.T2.3.4.1.4.1\">\n<span class=\"ltx_p\" id=\"S6.T2.3.4.1.4.1.1\" style=\"width:42.7pt;\">399</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T2.3.5.2\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_b ltx_border_l ltx_border_r ltx_border_t\" id=\"S6.T2.3.5.2.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T2.3.5.2.1.1\">SMDP</span></th>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_b ltx_border_t\" id=\"S6.T2.3.5.2.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S6.T2.3.5.2.2.1\">\n<span class=\"ltx_p\" id=\"S6.T2.3.5.2.2.1.1\" style=\"width:42.7pt;\">610</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_b ltx_border_t\" id=\"S6.T2.3.5.2.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S6.T2.3.5.2.3.1\">\n<span class=\"ltx_p\" id=\"S6.T2.3.5.2.3.1.1\" style=\"width:42.7pt;\">526</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_b ltx_border_r ltx_border_t\" id=\"S6.T2.3.5.2.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S6.T2.3.5.2.4.1\">\n<span class=\"ltx_p\" id=\"S6.T2.3.5.2.4.1.1\" style=\"width:42.7pt;\">443</span>\n</span>\n</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 182 |
+
"capture": "TABLE II: Hit count for MDP and SMDP under different request rates"
|
| 183 |
+
}
|
| 184 |
+
},
|
| 185 |
+
"image_paths": {
|
| 186 |
+
"1": {
|
| 187 |
+
"figure_path": "2402.14576v3_figure_1.png",
|
| 188 |
+
"caption": "Figure 1: Caching system topology",
|
| 189 |
+
"url": "http://arxiv.org/html/2402.14576v3/extracted/5963660/figures/1L.png"
|
| 190 |
+
},
|
| 191 |
+
"2": {
|
| 192 |
+
"figure_path": "2402.14576v3_figure_2.png",
|
| 193 |
+
"caption": "Figure 2: Total hit counts for different values for \u03b7\ud835\udf02\\etaitalic_\u03b7",
|
| 194 |
+
"url": "http://arxiv.org/html/2402.14576v3/extracted/5963660/figures/Populartyhit.png"
|
| 195 |
+
},
|
| 196 |
+
"3": {
|
| 197 |
+
"figure_path": "2402.14576v3_figure_3.png",
|
| 198 |
+
"caption": "Figure 3: Total utility for a) different values for \u03b7\ud835\udf02\\etaitalic_\u03b7",
|
| 199 |
+
"url": "http://arxiv.org/html/2402.14576v3/extracted/5963660/figures/PopularityUt.png"
|
| 200 |
+
},
|
| 201 |
+
"4": {
|
| 202 |
+
"figure_path": "2402.14576v3_figure_4.png",
|
| 203 |
+
"caption": "Figure 4: Total hit count for different values for \u03bb\ud835\udf06\\lambdaitalic_\u03bb",
|
| 204 |
+
"url": "http://arxiv.org/html/2402.14576v3/extracted/5963660/figures/Interhit.png"
|
| 205 |
+
},
|
| 206 |
+
"5": {
|
| 207 |
+
"figure_path": "2402.14576v3_figure_5.png",
|
| 208 |
+
"caption": "Figure 5: Total utility for different values for \u03bb\ud835\udf06\\lambdaitalic_\u03bb",
|
| 209 |
+
"url": "http://arxiv.org/html/2402.14576v3/extracted/5963660/figures/InterUt.png"
|
| 210 |
+
},
|
| 211 |
+
"6": {
|
| 212 |
+
"figure_path": "2402.14576v3_figure_6.png",
|
| 213 |
+
"caption": "Figure 6: Total hit count for different cache sizes",
|
| 214 |
+
"url": "http://arxiv.org/html/2402.14576v3/extracted/5963660/figures/CShit.png"
|
| 215 |
+
},
|
| 216 |
+
"7": {
|
| 217 |
+
"figure_path": "2402.14576v3_figure_7.png",
|
| 218 |
+
"caption": "Figure 7: Total utility for different cache sizes",
|
| 219 |
+
"url": "http://arxiv.org/html/2402.14576v3/extracted/5963660/figures/CSUt.png"
|
| 220 |
+
},
|
| 221 |
+
"8": {
|
| 222 |
+
"figure_path": "2402.14576v3_figure_8.png",
|
| 223 |
+
"caption": "Figure 8: PPO enhancement",
|
| 224 |
+
"url": "http://arxiv.org/html/2402.14576v3/extracted/5963660/figures/PPO_Enhancement.png"
|
| 225 |
+
}
|
| 226 |
+
},
|
| 227 |
+
"validation": true,
|
| 228 |
+
"references": [],
|
| 229 |
+
"url": "http://arxiv.org/html/2402.14576v3"
|
| 230 |
+
}
|