|
{ |
|
"paper_id": "D19-1044", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T16:03:13.853597Z" |
|
}, |
|
"title": "Label-Specific Document Representation for Multi-Label Text Classification", |
|
"authors": [ |
|
{ |
|
"first": "Lin", |
|
"middle": [], |
|
"last": "Xiao", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Beijing Key Lab of Traffic Data Analysis and Mining", |
|
"institution": "Beijing Jiaotong University", |
|
"location": { |
|
"postCode": "{17112079, 18120367, 18120345", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Xin", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Beijing Key Lab of Traffic Data Analysis and Mining", |
|
"institution": "Beijing Jiaotong University", |
|
"location": { |
|
"postCode": "{17112079, 18120367, 18120345", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Boli", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Beijing Key Lab of Traffic Data Analysis and Mining", |
|
"institution": "Beijing Jiaotong University", |
|
"location": { |
|
"postCode": "{17112079, 18120367, 18120345", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Liping", |
|
"middle": [], |
|
"last": "Jing", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Beijing Key Lab of Traffic Data Analysis and Mining", |
|
"institution": "Beijing Jiaotong University", |
|
"location": { |
|
"postCode": "{17112079, 18120367, 18120345", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "lpjing@bjtu.edu.cn" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Multi-label text classification (MLTC) aims to tag most relevant labels for the given document. In this paper, we propose a Label-Specific Attention Network (LSAN) to learn the new document representation. LSAN takes advantage of label semantic information to determine the semantic connection between labels and document for constructing labelspecific document representation. Meanwhile, the self-attention mechanism is adopted to identify the label-specific document representation from document content information. In order to seamlessly integrate the above two parts, an adaptive fusion strategy is designed, which can effectively output the comprehensive document representation to build multilabel text classifier. Extensive experimental results on four benchmark datasets demonstrate that LSAN consistently outperforms the stateof-the-art methods, especially on the prediction of low-frequency labels. The code and hyper-parameter settings are released to facilitate other researchers 1 .", |
|
"pdf_parse": { |
|
"paper_id": "D19-1044", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Multi-label text classification (MLTC) aims to tag most relevant labels for the given document. In this paper, we propose a Label-Specific Attention Network (LSAN) to learn the new document representation. LSAN takes advantage of label semantic information to determine the semantic connection between labels and document for constructing labelspecific document representation. Meanwhile, the self-attention mechanism is adopted to identify the label-specific document representation from document content information. In order to seamlessly integrate the above two parts, an adaptive fusion strategy is designed, which can effectively output the comprehensive document representation to build multilabel text classifier. Extensive experimental results on four benchmark datasets demonstrate that LSAN consistently outperforms the stateof-the-art methods, especially on the prediction of low-frequency labels. The code and hyper-parameter settings are released to facilitate other researchers 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Text classification is a fundamental text mining task including multi-class classification and multilabel classification. The former only assigns one label to the given document, while the latter classifies one document into different topics. In this paper, we focus on multi-label text classification (MLTC) because it has become one of the core tasks in natural language processing and has been widely applied in topic recognition (Yang et al., 2016) , question answering (Kumar et al., 2016) , sentimental analysis (Cambria et al., 2014) and so on. With the boom of big data, MLTC becomes significantly challenging because it has to handle the massive documents, words and labels simultaneously. Therefore, it is an emergency to develop effective multi-label text classifier for various practical applications.", |
|
"cite_spans": [ |
|
{ |
|
"start": 433, |
|
"end": 452, |
|
"text": "(Yang et al., 2016)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 474, |
|
"end": 494, |
|
"text": "(Kumar et al., 2016)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 518, |
|
"end": 540, |
|
"text": "(Cambria et al., 2014)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Multi-label text classification allows for the coexistence of more than one label in a single document, thus, there are semantical correlations among labels because they may share the same subsets of document. Meanwhile, the document may be long and complicated semantic information may be hidden in the noisy or redundant content. Furthermore, most documents fall into few labels while a large number of \"tail labels\" only contain very few positive documents. To handle these issues, researchers pay much attention on three facets: 1) how to sufficiently capture the semantic patterns from the original documents, 2) how to extract the discriminative information related to the corresponding labels from each document, and 3) how to accurately mine the correlation among labels.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Till now, in the community of machine learning and natural language processing, researchers have paid tremendous efforts on developing MLTC methods in each facet. Among them, deep learning-based methods such as CNN (Liu et al., 2017; Kurata et al., 2016) , RNN (Liu et al., 2016) , combination of CNN and RNN (Lai et al., 2015; Chen et al., 2017) , attention mechanism (Yang et al., 2016; You et al., 2018) , (Adhikari et al., 2019) and etc., have achieved great success in document representation. However, most of them only focus on document representation but ignore the correlation among labels. Recently, some methods including DXML , EX-AM (Du et al., 2018) , SGM (Yang et al., 2018) , GILE (Pappas and Henderson, 2019) are proposed to capture the label correlations by exploiting label structure or label content. Although they obtained promising results in some cases, they still cannot work well when there is no big difference between label texts (e.g., the categories Management vs Management moves in Reuters News), which makes them hard to distinguish.", |
|
"cite_spans": [ |
|
{ |
|
"start": 215, |
|
"end": 233, |
|
"text": "(Liu et al., 2017;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 234, |
|
"end": 254, |
|
"text": "Kurata et al., 2016)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 261, |
|
"end": 279, |
|
"text": "(Liu et al., 2016)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 309, |
|
"end": 327, |
|
"text": "(Lai et al., 2015;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 328, |
|
"end": 346, |
|
"text": "Chen et al., 2017)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 369, |
|
"end": 388, |
|
"text": "(Yang et al., 2016;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 389, |
|
"end": 406, |
|
"text": "You et al., 2018)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 409, |
|
"end": 432, |
|
"text": "(Adhikari et al., 2019)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 646, |
|
"end": 663, |
|
"text": "(Du et al., 2018)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 670, |
|
"end": 689, |
|
"text": "(Yang et al., 2018)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 697, |
|
"end": 725, |
|
"text": "(Pappas and Henderson, 2019)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In MLTC task, one document may contain multiple labels, and each label can be taken as one aspect or component of the document, thus, the overall semantics of the whole document can be formed by multiple components. Motivated by the above-mentioned observations, we propose a novel Label-Specific Attention Network model (LSAN) to learn document representation by sufficiently exploiting the document content and label content. To capture the label-related component from each document, we adopt the self-attention mechanism to measure the contribution of each word to each label. Meanwhile, LSAN takes advantage of label texts to embed each label into a vector like word embedding, so that the semantic relations between document words and labels can be explicitly computed. Thereafter, an adaptive fusion strategy is designed to extract the proper amount of information from these two aspects and construct the label-specific representation for each document. We summarize the main contributions:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 A label-specific attention network model is proposed to handle multi-label text classification task by considering document content and label texts simultaneously.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 An adaptive fusion strategy is first designed to adaptively extract the proper semantical information to construct label-specific document representation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 The performance of LSAN is thoroughly investigated on four widely-used benchmark datasets in terms of several evaluation metrics, indicating its advantage over the stateof-the-art baselines.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The rest of the paper is organized as follows. Section 2 describes the proposed LSAN model for multi-label text classification. The experiments on real-word datasets are conducted in Section 3 and their results are discussed in detail. Section 4 lists the related work. The brief conclusions and future work are given in Section 5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this section, we introduce the proposed labelspecific attention network, as shown in Figure 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 88, |
|
"end": 96, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "LSAN consists of two main parts. The first part is to capture the label-related components from each document by exploiting both document content and label texts. The second part aims to adaptively extract the proper information from two aspects. Finally, the classification model can be trained on the fused label-specific document representations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Problem Definition:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preliminaries", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Let D = {(x i , y i )} N i=1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preliminaries", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "denote the set of documents, which consists of N documents with corresponding labels Y = {y i \u2208 {0, 1} l }, here l is the total number of labels. Each document contains a sequence of words. Each word can be encoded to a low-dimensional space and represented as a d-dimension vector via word2vector technique (Pennington et al., 2014) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 308, |
|
"end": 333, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preliminaries", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Let x i = {w 1 , \u2022 \u2022 \u2022 , w p , \u2022 \u2022 \u2022 , w n } denote the i-th document, w p \u2208 R k", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preliminaries", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "is the p-th word vector in the document, n is the number of words in document.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preliminaries", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "For text classification, each label contains textual information. Thus, similar to the document word, one label can be represented as an embedding vector and the label set will be encoded by a trainable matrix C \u2208 R l\u00d7k . Given the input documents and their associated labels D, MLTC aims to train a classifier to assign the most relevant labels to the new coming documents.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preliminaries", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "To capture the forward and backward sides contextual information of each word, we adopt the bidirectional long short-term memory (Bi-LSTM) language model to learn the word embedding for each input document. At time-step p, the hidden state can be updated with the aid of input and (p \u2212 1)-th step output.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Text Representation:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u2212 \u2192 hp = LST M ( \u2212\u2212\u2192 hp\u22121, wp) \u2190 \u2212 hp = LST M ( \u2190\u2212\u2212 hp\u22121, wp)", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Input Text Representation:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where w p is the embedding vector of the p-th word in the corresponding document, and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Text Representation:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2212 \u2192 h p , \u2190 \u2212 h p \u2208 R k", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Text Representation:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "indicate the forward and backward word context representations respectively. Then, the whole document can be represented by Bi-LSTM as follows.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Text Representation:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "H = ( \u2212 \u2192 H , \u2190 \u2212 H ) \u2212 \u2192 H = ( \u2212 \u2192 h1, \u2212 \u2192 h2, \u2022 \u2022 \u2022 , \u2212 \u2192 hn) \u2190 \u2212 H = ( \u2190 \u2212 h1, \u2190 \u2212 h2, \u2022 \u2022 \u2022 , \u2190 \u2212 hn)", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Input Text Representation:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this case, the whole document set can be taken as a matrix H \u2208 R 2k\u00d7n . ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Text Representation:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this subsection, we will give the proposed attention network model for label-specific document representation learning. It aims to determin the label-related component from each document. Actually, this strategy is intuitive for text classification. For example, regarding the text \"June a friday, in the lawn, a war between the young boys of the football game starte\", it is assigned into two categories youth and sports. Obviously, the content \"young boy\" is much more related to youth than to sports, while \"football game\" should be directly related to sports. Next, we will show how to capture this characteristic with our model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label-Specific Attention Network", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "As mentioned above, the multi-label document may be tagged by more than one label, and each document should have the most relative contexts with its corresponding labels. In other words, each document may contain multiple components, and the words in one document make different contributions to each label. To capture different components for each label, we adopt the self-attention mechanism (Lin et al., 2017) , which has been successful used in various text mining tasks (Tan et al., 2018; Al-Sabahi et al., 2018; You et al., 2018) . The label-word attention score (A s \u2208 R l\u00d7n ) can be obtained by", |
|
"cite_spans": [ |
|
{ |
|
"start": 394, |
|
"end": 412, |
|
"text": "(Lin et al., 2017)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 475, |
|
"end": 493, |
|
"text": "(Tan et al., 2018;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 494, |
|
"end": 517, |
|
"text": "Al-Sabahi et al., 2018;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 518, |
|
"end": 535, |
|
"text": "You et al., 2018)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-attention Mechanism", |
|
"sec_num": "2.2.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "A (s) = sof tmax(W2tanh(W1H))", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Self-attention Mechanism", |
|
"sec_num": "2.2.1" |
|
}, |
|
{ |
|
"text": "where W 1 \u2208 R da\u00d72k and W 2 \u2208 R l\u00d7da are the socalled self-attention parameters to be trained. d a is a hyper-parameter we can set arbitrarily. Each row A (s) j\u2022 (an n-dim row vector where n is the total number of words) indicates the contribution of all words to the j-th label. Then, we can obtain the the linear combination of the context words for each label with the aid of label-word attention score (A (s) ) as follows.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-attention Mechanism", |
|
"sec_num": "2.2.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "M (s) j\u2022 = A (s) j H T", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Self-attention Mechanism", |
|
"sec_num": "2.2.1" |
|
}, |
|
{ |
|
"text": "which can be taken as a new representation of the input document along the j-th label. Then the whole matrix M (s) \u2208 R l\u00d72k is the label-specific document representation under the self-attention mechanism.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-attention Mechanism", |
|
"sec_num": "2.2.1" |
|
}, |
|
{ |
|
"text": "Self-attention mechanism can be taken as contentbased attention because it only considers the document content. As we all know, labels have specific semantics in text classification, which is hidden in the label texts or descriptions. To make use of the semantic information of labels, they are preprocessed and represented as a trainable matrix C \u2208 R l\u00d7k in the same latent k-dim space with the words. Once having the word embedding from Bi-LSTM in (1) and the label embedding in C, we can explicitly determine the semantic relation between each pair of word and label. A simple way is calculating the dot product between", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label-Attention Mechanism", |
|
"sec_num": "2.2.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u2212 \u2192 h p (or \u2190 \u2212 h p ) and C j\u2022 as follows. \u2212 \u2192 A (l) = C \u2212 \u2192 H \u2190 \u2212 A (l) = C \u2190 \u2212 H", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Label-Attention Mechanism", |
|
"sec_num": "2.2.2" |
|
}, |
|
{ |
|
"text": "where \u2212 \u2192 A (l) \u2208 R l\u00d7n and \u2190 \u2212 A (l) \u2208 R l\u00d7n indicate the forward and backward sides semantic relation between words and labels. Similar to the previous self-attention mechanism, the label-specific document representation can be constructed by linear combining the label's context words as follows.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label-Attention Mechanism", |
|
"sec_num": "2.2.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u2212 \u2192 M (l) = \u2212 \u2192 A (l) \u2212 \u2192 H T \u2190 \u2212 M (l) = \u2190 \u2212 A (l) \u2190 \u2212 H T", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "Label-Attention Mechanism", |
|
"sec_num": "2.2.2" |
|
}, |
|
{ |
|
"text": "Finally, the document can be re-represented along all labels via", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label-Attention Mechanism", |
|
"sec_num": "2.2.2" |
|
}, |
|
{ |
|
"text": "M (l) = ( \u2212 \u2192 M (l) , \u2190 \u2212 M (l) ) \u2208 R l\u00d72k", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label-Attention Mechanism", |
|
"sec_num": "2.2.2" |
|
}, |
|
{ |
|
"text": ". This representation is based on the label texts, thus, we called it as label-attention mechanism.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label-Attention Mechanism", |
|
"sec_num": "2.2.2" |
|
}, |
|
{ |
|
"text": "Both M (s) and M (l) are label-specific document representation, but they are different. The former focuses on the document content, while the later prefers to the semantic correlation between document content and label text. In order to take advantage these two parts, in this subsection, an attention fusion strategy is proposed to adaptively extract proper amount of information from them and build comprehensive label-specific document representation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 7, |
|
"end": 10, |
|
"text": "(s)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 17, |
|
"end": 20, |
|
"text": "(l)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Adaptive Attention Fusion Strategy", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "More specifically, two weight vectors (\u03b1, \u03b2 \u2208 R l ) are introduced to determine the importances of the above two mechanisms, which can obtained by a fully connected layer on the input M (s) and M (l) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 196, |
|
"end": 199, |
|
"text": "(l)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Adaptive Attention Fusion Strategy", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "\u03b1 = sigmoid(M (s) W3) \u03b2 = sigmoid(M (l) W4) (7) W 3 , W 4 \u2208 R 2k", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Adaptive Attention Fusion Strategy", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "are the parameters to be trained. \u03b1 j and \u03b2 j indicate the importances of self-attention and label-attention to construct the final document representation along the j-th label respectively. Therefore, we add the constraint on them:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Adaptive Attention Fusion Strategy", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03b1j + \u03b2j = 1", |
|
"eq_num": "(8)" |
|
} |
|
], |
|
"section": "Adaptive Attention Fusion Strategy", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Then, we can obtain the final document representation along the j-th label based on fusion weights as follows.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Adaptive Attention Fusion Strategy", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "Mj\u2022 = \u03b1jM (s) j\u2022 + \u03b2jM (l) j\u2022", |
|
"eq_num": "(9)" |
|
} |
|
], |
|
"section": "Adaptive Attention Fusion Strategy", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "The label-specific document representation along all labels can be described as a matrix M \u2208 R l\u00d72k .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Adaptive Attention Fusion Strategy", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Once having the comprehensive label-specific document representation, we can build the multilabel text classifier via a multilayer perceptron with two fully connected layers. Mathematically, the predicted probability of each label for the coming document can be estimated vi\u00e2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label Prediction", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "y = sigmoid(W6f (W5M T ))", |
|
"eq_num": "(10)" |
|
} |
|
], |
|
"section": "Label Prediction", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Here W 5 \u2208 R b\u00d72k , W 6 \u2208 R b are the trainable parameters of the fully connected layer and output layer respectively. f is the ReLU nonlinear activation function. The sigmoid function is used to transfer the output value into a probability, in this case, the cross-entropy loss can be used as the loss function which has been proved suitable for multilabel text classification task (Nam et al., 2014) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 383, |
|
"end": 401, |
|
"text": "(Nam et al., 2014)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label Prediction", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "L = \u2212 N i=1 l j=1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label Prediction", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "(yij log(\u0177ij))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label Prediction", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "+ (1 \u2212 yij) log(1 \u2212\u0177ij)", |
|
"eq_num": "(11)" |
|
} |
|
], |
|
"section": "Label Prediction", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "where N is the number of training documents, l is the number of labels,\u0177 ij \u2208 [0, 1] is the predicted probability, and y ij \u2208 {0, 1} indicates the ground truth of the i-th document along the j-th label.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label Prediction", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "In this section, we evaluate the proposed model on four datasets (with various number of labels from 54 to 3956) by comparing with the state-of-the-art methods in terms of widely used metrics.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Datasets For the first three data sets, only last 500 words were kept for each document, while the last 50 words were used in KanShan-Cup dataset. Once the document has less than the predifined number of words, we extend it by padding zeros. All methods are trained and tested on the given training and testing datasets which are summarized in Table 1 . Evaluation Metrics: We use two kinds of metric, precision at top K (P @k) and the Normalized Discounted Cumulated Gains at top K (nDCG@k) to evaluate the prediction performance. P @k and nDCG@k are defined according to the predicted score vector\u0177 \u2208 R l and the ground truth label vector y \u2208 {0, 1} l as follows.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 344, |
|
"end": 351, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Setting", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "P @k = 1 k l\u2208rank k (\u0177) y l DCG@k = l\u2208rank k (\u0177) y l log(l + 1) nDCG@k = DCG@k min(k, y 0 ) l=1 1 log(l+1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setting", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where rank k (y) is the label indexes of the top k highest scores of the current prediction result. y 0 counts the number of relevant labels in the ground truth label vector y. Baseline Models: The proposed LSAN is a deep neural network model, thus the recent state-of-theart deep learning-based MLTC methods are selected as baselines.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setting", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "\u2022 XML-CNN: (Liu et al., 2017) adopts Convolutional Neural Network (CNN) and a dynamic pooling technique to extract high-level feature for multi-label text classification. \u2022 SGM: (Yang et al., 2018) applies a sequence generation model from input document to output label to construct the multi-label text classifier. \u2022 DXML: tries to explore the label correlation by considering the label structure from the label co-occurrence graph.", |
|
"cite_spans": [ |
|
{ |
|
"start": 11, |
|
"end": 29, |
|
"text": "(Liu et al., 2017)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 178, |
|
"end": 197, |
|
"text": "(Yang et al., 2018)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setting", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "\u2022 AttentionXML: (You et al., 2018) builds the label-aware document representation only based on the document content, thus, it can be taken as one special case of our proposed LSAN with arbitrarily setting \u03b1 = 0.", |
|
"cite_spans": [ |
|
{ |
|
"start": 16, |
|
"end": 34, |
|
"text": "(You et al., 2018)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setting", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "\u2022 EXAM: (Du et al., 2018) is the most similar work to LSAN because both of them exploit the label text to learn the interaction between words and labels. However, EXAM suffers from the situation where different labels have similar text. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 8, |
|
"end": 25, |
|
"text": "(Du et al., 2018)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setting", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In this section, the proposed LSAN is evaluated on four benchmark datasets by comparing with five baselines in terms of P @K and nDCG@K(K = 1, 3, 5). Table 2 and Table 3 show the averaged performance of all test documents. According to the formula of P @K and nDCG@K, we know P @1 = nDCG@1, thus only nDCG@3 and nDCG@5 are listed in Table 3 . In each line, the best result is marked in bold. From Table 2 and 3, we can make a number of observations about these results. Firstly, XML-CNN is worse than other four methods because it only considers the document content but ignores the label correlation which has been proven very important for multi-label learning. Secondly, At-tentionXML is superior to EXAM on datasets R-CV1 and Kanshan-Cup, because these two datases have hierarchical label structures. In this case, parent label and child label may contain similar text, which makes them hard to distinguish according to the text-based embedding and further reduce the performance of EXAM. By compar- ing with EXAM and the proposed LSAN, however, AttentionXML performs worse on EUR-Lex dataset The main reason is that AttentionXML only focuses on the document content, which will make it not sufficiently trained once there are only few documents in some labels. Fortunately, EX-AM and LSAN benefit from the label texts. Last one, as expected, is that LSAN consistently outperforms all baselines on all experimental datasets. This result further confirms that the proposed adaptive attention fusion strategy is much helpful to learn the label-specific document representation for multi-label text classification.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 150, |
|
"end": 169, |
|
"text": "Table 2 and Table 3", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 333, |
|
"end": 340, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
}, |
|
{ |
|
"start": 397, |
|
"end": 404, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comparison Results and Discussion", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In order to verify the performance of LSAN on low-frequency labels, we divided labels in EUR-Lex into three groups according to their occurring frequency. Figure 2 shows the distribution of label frequency on EUR-Lex, F is the frequency of label. Among it, nearly 55% of labels occur between 1 and 5 times to form the first label group (Group1). The labels appearing 5-37 times are assigned into Group2, which is 35% of the whole label set. The remaining 10% frequent labels form the last group (Group3). Obviously, Group1 is much harder than other two groups due to the lack of training documents. Figure 3 shows the prediction results in terms of P @1, P @3 and P @5 obtained by AttentionXML, EXAM and LSAN. Three methods become better and better from Group1 to Group3, which is reasonable because more and more documents are included in each label from Group1 to Group3. L-SAN significantly improves the prediction performance on Group1. Especially, LSAN obtains an average of more than 83.82%, 182.55%, 244.62% gain on three metrices for group 1 to Attention-XML, and 3.85%, 27.19%, 58.27% gain to EX-AM. This result demonstrates the superiority of the proposed model on multi-label text data with tail labels.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 155, |
|
"end": 163, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 599, |
|
"end": 607, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comparison on Sparse Data", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The proposed LSAN can be taken as a joint attention strategy including three parts. One is selfattention based on document content (denoted as A). The second one is label-attention based on label text (denoted as L). Another one is fusion attention by adaptively integrating A and L with proper weights (denoted as W). In this section, we try to demonstrate the effection of each component via an ablation test. Figure 4 lists the prediction results on four datasets in terms of P @1, P @3 and P @5. (i.e., S+L gets better results than S and L). S prefers to finding the useful content when constructing the label-specific document representation, but it ignores the label information. L takes adantage of label text to explicitly determine the semantic relation between documents and labels, however, label text is not easy to distinguish the difference between labels (e.g., Management vs. Management movies). Therefore, coupling with both attentions is really reasonable. Furthermore, adaptively extracting proper amount of information from these two attentions benefits the final multi-label text classification.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 412, |
|
"end": 420, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ablation Test", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "To further verify the effectiveness of attention adaptive fusion, Figure 5 lists the distribution of weights on Self-attention and Label-attention on two representative datasets, one for sparse data (EUR-Lex) and the other for dense data (AAPD). As expected, the label-attention is much more useful than self-attention for sparse data, vice versa for dense data. In dense data, each label has sufficient documents, therefore, self-attention can sufficiently obtain label-specific document representation. On the other hand, label text is helpful to extract the semantic relations between labels and documents. Results on other two datasets have the similar trends which are omitted due to page limitation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 66, |
|
"end": 74, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ablation Test", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "For investigating the effect of label-attention, we visualize the attention weights on the original document using heat map, as shown in Figure 6 . Among it, the example AAPD document belongs to two categories Computer Vision and Neural and Evolutionary Computing. From the attention weights, we can see that each category has its own most related words, which confirms that the proposed label specific attention network is able to extract the label-aware content and further construct label-specific document representation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 137, |
|
"end": 145, |
|
"text": "Figure 6", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ablation Test", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "In the line of MLTC, most works focus on two issues, one is document representation learning and Figure 5 : Weight distribution for two components on EUR-Lex (left subfigure) and AAPD (right subfigure). Horizontal axis is the range of weight from 0 to 1 with 0.1 gap. Vertical axis is the frequency that the specific range occurs in current label group. the other is label correlation detection.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 97, |
|
"end": 105, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "For document representation, along with the recent success in CNN, many works are proposed based on CNN (Kim, 2014; Liu et al., 2017; Chen et al., 2017) , which can capture the local correlations from the consecutive context windows. Although they obain promising results, these methods suffer from the limitation of window size so that they cannot determine the long-distance dependency of text. Meanwhile, they treat all words equally no matter how noisy the word is. Later, RNN and attention mechanism are introduced to get brilliant results (Yang et al., 2016) . To implicitly learn the document representation for each label, the self-attention mechanism (Lin et al., 2017) is adopted for multi-label classification (You et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 115, |
|
"text": "(Kim, 2014;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 116, |
|
"end": 133, |
|
"text": "Liu et al., 2017;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 134, |
|
"end": 152, |
|
"text": "Chen et al., 2017)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 545, |
|
"end": 564, |
|
"text": "(Yang et al., 2016)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 660, |
|
"end": 678, |
|
"text": "(Lin et al., 2017)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 721, |
|
"end": 739, |
|
"text": "(You et al., 2018)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "To determine the label correlation among multilabel data, in literatures, researchers proposed various methods. Kurata et al. (2016) adopt an initialization method to leverage label co-occurrence information. SLEEC (Bhatia et al., 2015) divides dataset into several clusters, and in each cluster it detects embedding vectors by capturing non-linear label correlation. DXML establishes an explicit label co-occurrence graph to explore label embedding in low-dimension laten-t space. Yang et al. (2018) use sequence-tosequence(Seq2Seq) model to consider the correlations between labels. Recently, the textual information of labels are used to guide MLTC. EX-AM (Du et al., 2018) introduces the interaction mechanism to incorporate word-level matching signals into the text classification task. GILE (Pappas and Henderson, 2019) proposes a joint inputlabel embedding model for neural text classification. Unfortunately, they cannot work well when there is no big difference between label texts.", |
|
"cite_spans": [ |
|
{ |
|
"start": 112, |
|
"end": 132, |
|
"text": "Kurata et al. (2016)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 215, |
|
"end": 236, |
|
"text": "(Bhatia et al., 2015)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 482, |
|
"end": 500, |
|
"text": "Yang et al. (2018)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 659, |
|
"end": 676, |
|
"text": "(Du et al., 2018)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "A new label-specific attention network, in this paper, is proposed for multi-label text classification. It makes use of document content and label text to learn the label-specific document representation with the aid of self-attention and label-attention mechanisms. An adaptive fusion is designed to effectively integrate these two attention mechanisms to improve the final prediction performance. Extensive experiments on four benchmark datasets prove the superiority of LSAN by comparing with the state-of-the-art methods, especially on the dataset with large subset of low-frequency labels.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In real applications, more precious information can be collected, such as label description, la-bel topology (e.g., hierarchical structure) and etc. Therefore, it is interesting to extend the current model with such extra information.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "https://github.com/EMNLP2019LSAN/ LSAN/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/lancopku/SGM 3 https://drive.google.com/drive/ folders/1KQMBZgACUm-ZZcSrQpDPlB6CFKvf9Gfb 4 https://www.biendata.com/competition/ zhihu/data/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank the outstanding anonymous reviewers for their helpful comments to improve our manuscript. This work was supported in part by the National Natural ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 161, |
|
"end": 168, |
|
"text": "Natural", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Docbert: Bert for document classification", |
|
"authors": [ |
|
{ |
|
"first": "Ashutosh", |
|
"middle": [], |
|
"last": "Adhikari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Achyudh", |
|
"middle": [], |
|
"last": "Ram", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raphael", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1904.08398" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashutosh Adhikari, Achyudh Ram, Raphael Tang, and Jimmy Lin. 2019. Docbert: Bert for document clas- sification. arXiv preprint arXiv:1904.08398.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "A hierarchical structured selfattentive model for extractive document summarization (hssas)", |
|
"authors": [ |
|
{ |
|
"first": "Kamal", |
|
"middle": [], |
|
"last": "Al-Sabahi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhang", |
|
"middle": [], |
|
"last": "Zuping", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammed", |
|
"middle": [], |
|
"last": "Nadher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "IEEE Access", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "24205--24212", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kamal Al-Sabahi, Zhang Zuping, and Mohammed Nadher. 2018. A hierarchical structured self- attentive model for extractive document summariza- tion (hssas). IEEE Access, 6:24205-24212.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Sparse local embeddings for extreme multi-label classification", |
|
"authors": [ |
|
{ |
|
"first": "Kush", |
|
"middle": [], |
|
"last": "Bhatia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Himanshu", |
|
"middle": [], |
|
"last": "Jain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Purushottam", |
|
"middle": [], |
|
"last": "Kar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manik", |
|
"middle": [], |
|
"last": "Varma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prateek", |
|
"middle": [], |
|
"last": "Jain", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "730--738", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kush Bhatia, Himanshu Jain, Purushottam Kar, Manik Varma, and Prateek Jain. 2015. Sparse local em- beddings for extreme multi-label classification. In Advances in neural information processing systems, pages 730-738.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Senticnet 3: a common and common-sense knowledge base for cognition-driven sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Cambria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Olsher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dheeraj", |
|
"middle": [], |
|
"last": "Rajagopal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Twenty-eighth AAAI conference on artificial intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erik Cambria, Daniel Olsher, and Dheeraj Rajagopal. 2014. Senticnet 3: a common and common-sense knowledge base for cognition-driven sentiment anal- ysis. In Twenty-eighth AAAI conference on artificial intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Ensemble application of convolutional and recurrent neural networks for multi-label text categorization", |
|
"authors": [ |
|
{ |
|
"first": "Guibin", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deheng", |
|
"middle": [], |
|
"last": "Ye", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhenchang", |
|
"middle": [], |
|
"last": "Xing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jieshan", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Cambria", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "2017 International Joint Conference on Neural Networks (IJCN-N)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2377--2383", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guibin Chen, Deheng Ye, Zhenchang Xing, Jieshan Chen, and Erik Cambria. 2017. Ensemble applica- tion of convolutional and recurrent neural networks for multi-label text categorization. In 2017 Interna- tional Joint Conference on Neural Networks (IJCN- N), pages 2377-2383. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Explicit interaction model towards text classification", |
|
"authors": [ |
|
{ |
|
"first": "Cunxiao", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhaozheng", |
|
"middle": [], |
|
"last": "Chin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fuli", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tian", |
|
"middle": [], |
|
"last": "Gan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liqiang", |
|
"middle": [], |
|
"last": "Nie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1811.09386" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cunxiao Du, Zhaozheng Chin, Fuli Feng, Lei Zhu, Tian Gan, and Liqiang Nie. 2018. Explicit in- teraction model towards text classification. arXiv preprint arXiv:1811.09386.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Convolutional neural networks for sentence classification", |
|
"authors": [ |
|
{ |
|
"first": "Yoon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoon Kim. 2014. Convolutional neural network- s for sentence classification. arXiv preprint arX- iv:1408.5882.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1412.6980" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Ask me anything: Dynamic memory networks for natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Ankit", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ozan", |
|
"middle": [], |
|
"last": "Irsoy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Ondruska", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Bradbury", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ishaan", |
|
"middle": [], |
|
"last": "Gulrajani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Zhong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Romain", |
|
"middle": [], |
|
"last": "Paulus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1378--1387", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ankit Kumar, Ozan Irsoy, Peter Ondruska, Mohit Iyy- er, James Bradbury, Ishaan Gulrajani, Victor Zhong, Romain Paulus, and Richard Socher. 2016. Ask me anything: Dynamic memory networks for natural language processing. In International Conference on Machine Learning, pages 1378-1387.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Improved neural network-based multi-label classification with better initialization leveraging label cooccurrence", |
|
"authors": [ |
|
{ |
|
"first": "Gakuto", |
|
"middle": [], |
|
"last": "Kurata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Xiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bowen", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "521--526", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gakuto Kurata, Bing Xiang, and Bowen Zhou. 2016. Improved neural network-based multi-label classifi- cation with better initialization leveraging label co- occurrence. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 521-526.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Recurrent convolutional neural networks for text classification", |
|
"authors": [ |
|
{ |
|
"first": "Siwei", |
|
"middle": [], |
|
"last": "Lai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liheng", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Twenty-ninth AAAI conference on artificial intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Siwei Lai, Liheng Xu, Kang Liu, and Jun Zhao. 2015. Recurrent convolutional neural networks for tex- t classification. In Twenty-ninth AAAI conference on artificial intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Rcv1: A new benchmark collection for text categorization research", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "David", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tony", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fan", |
|
"middle": [], |
|
"last": "Rose", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Journal of machine learning research", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "361--397", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David D Lewis, Yiming Yang, Tony G Rose, and Fan Li. 2004. Rcv1: A new benchmark collection for text categorization research. Journal of machine learning research, 5(Apr):361-397.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "A structured self-attentive sentence embedding", |
|
"authors": [ |
|
{ |
|
"first": "Zhouhan", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minwei", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cicero", |
|
"middle": [], |
|
"last": "Nogueira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mo", |
|
"middle": [], |
|
"last": "Santos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bowen", |
|
"middle": [], |
|
"last": "Xiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1703.03130" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhouhan Lin, Minwei Feng, Cicero Nogueira dos San- tos, Mo Yu, Bing Xiang, Bowen Zhou, and Yoshua Bengio. 2017. A structured self-attentive sentence embedding. arXiv preprint arXiv:1703.03130.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Deep learning for extreme multilabel text classification", |
|
"authors": [ |
|
{ |
|
"first": "Jingzhou", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Cheng", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuexin", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 40th International ACM SIGIR Conference on Research and Development in Information Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "115--124", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jingzhou Liu, Wei-Cheng Chang, Yuexin Wu, and Y- iming Yang. 2017. Deep learning for extreme multi- label text classification. In Proceedings of the 40th International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 115-124. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Recurrent neural network for text classification with multi-task learning", |
|
"authors": [ |
|
{ |
|
"first": "Pengfei", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xipeng", |
|
"middle": [], |
|
"last": "Qiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xuanjing", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pengfei Liu, Xipeng Qiu, and Xuanjing Huang. 2016. Recurrent neural network for text classification with multi-task learning. arXiv preprint arX- iv:1605.05101.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Efficient pairwise multilabel classification for largescale problems in the legal domain", |
|
"authors": [ |
|
{ |
|
"first": "Eneldo", |
|
"middle": [], |
|
"last": "Loza Mencia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johannes", |
|
"middle": [], |
|
"last": "F\u00fcrnkranz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Joint European Conference on Machine Learning and Knowledge Discovery in Databases", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "50--65", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eneldo Loza Mencia and Johannes F\u00fcrnkranz. 2008. Efficient pairwise multilabel classification for large- scale problems in the legal domain. In Join- t European Conference on Machine Learning and Knowledge Discovery in Databases, pages 50-65. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Largescale multi-label text classification\u0142revisiting neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Jinseok", |
|
"middle": [], |
|
"last": "Nam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jungi", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eneldo", |
|
"middle": [], |
|
"last": "Loza Menc\u00eda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johannes", |
|
"middle": [], |
|
"last": "F\u00fcrnkranz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Joint european conference on machine learning and knowledge discovery in databases", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "437--452", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jinseok Nam, Jungi Kim, Eneldo Loza Menc\u00eda, Iryna Gurevych, and Johannes F\u00fcrnkranz. 2014. Large- scale multi-label text classification\u0142revisiting neural networks. In Joint european conference on ma- chine learning and knowledge discovery in databas- es, pages 437-452. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Gile: A generalized input-label embedding for text classification", |
|
"authors": [ |
|
{ |
|
"first": "Nikolaos", |
|
"middle": [], |
|
"last": "Pappas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Henderson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Transactions of the Association for Computational Linguistics (TACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nikolaos Pappas and James Henderson. 2019. Gile: A generalized input-label embedding for text classifi- cation. Transactions of the Association for Compu- tational Linguistics (TACL), 7.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. Glove: Global vectors for word representation. In Proceedings of the 2014 confer- ence on empirical methods in natural language pro- cessing (EMNLP), pages 1532-1543.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Deep semantic role labeling with self-attention", |
|
"authors": [ |
|
{ |
|
"first": "Zhixing", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mingxuan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Xie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yidong", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Thirty-Second AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhixing Tan, Mingxuan Wang, Jun Xie, Yidong Chen, and Xiaodong Shi. 2018. Deep semantic role label- ing with self-attention. In Thirty-Second AAAI Con- ference on Artificial Intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Sgm: sequence generation model for multi-label classification", |
|
"authors": [ |
|
{ |
|
"first": "Pengcheng", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xu", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuming", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Houfeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3915--3926", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pengcheng Yang, Xu Sun, Wei Li, Shuming Ma, Wei Wu, and Houfeng Wang. 2018. Sgm: sequence gen- eration model for multi-label classification. In Pro- ceedings of the 27th International Conference on Computational Linguistics, pages 3915-3926.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Hierarchical attention networks for document classification", |
|
"authors": [ |
|
{ |
|
"first": "Zichao", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diyi", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Smola", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1480--1489", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zichao Yang, Diyi Yang, Chris Dyer, Xiaodong He, Alex Smola, and Eduard Hovy. 2016. Hierarchi- cal attention networks for document classification. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, pages 1480-1489.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Attentionxml: Extreme multi-label text classification with multilabel attention based recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Ronghui", |
|
"middle": [], |
|
"last": "You", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Suyang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hiroshi", |
|
"middle": [], |
|
"last": "Mamitsuka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shanfeng", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1811.01727" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ronghui You, Suyang Dai, Zihan Zhang, Hiroshi Mamitsuka, and Shanfeng Zhu. 2018. Attentionxm- l: Extreme multi-label text classification with multi- label attention based recurrent neural networks. arXiv preprint arXiv:1811.01727.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Deep extreme multi-label learning", |
|
"authors": [ |
|
{ |
|
"first": "Wenjie", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junchi", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiangfeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongyuan", |
|
"middle": [], |
|
"last": "Zha", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 ACM on International Conference on Multimedia Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "100--107", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wenjie Zhang, Junchi Yan, Xiangfeng Wang, and Hongyuan Zha. 2018. Deep extreme multi-label learning. In Proceedings of the 2018 ACM on Inter- national Conference on Multimedia Retrieval, pages 100-107. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Text classification improved by integrating bidirectional lstm with two-dimensional max pooling", |
|
"authors": [ |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhenyu", |
|
"middle": [], |
|
"last": "Qi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Suncong", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiaming", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongyun", |
|
"middle": [], |
|
"last": "Bao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peng Zhou, Zhenyu Qi, Suncong Zheng, Jiaming Xu, Hongyun Bao, and Bo Xu. 2016. Text classifica- tion improved by integrating bidirectional lstm with two-dimensional max pooling. arXiv preprint arX- iv:1611.06639.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "The architecture of the proposed label-specific attention network model (LSAN).", |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "The label distribution of EUR-Lex", |
|
"num": null |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Result of the ablation test. 'A' denotes the Self-Attention, 'L' denotes the Label-Attention, 'W' denotes the Fusion Attention with Adaptive Weights.", |
|
"num": null |
|
}, |
|
"FIGREF3": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Demonstration of words with largest label-attention weights (A (l) ) in one AAPD document belonging to two categories:Computer Vision and Neural and Evolutionary Computing.", |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"content": "<table><tr><td>Datasets</td><td>N</td><td>M</td><td>D</td><td colspan=\"5\">LLLWW</td></tr><tr><td>RCV1</td><td>23,149</td><td>781,265</td><td>47,236</td><td>103</td><td>3.18</td><td>729.67</td><td>259.47</td><td>269.23</td></tr><tr><td>AAPD</td><td>54,840</td><td>1,000</td><td>69,399</td><td>54</td><td colspan=\"2\">2.41 2444.04</td><td>163.42</td><td>171.65</td></tr><tr><td>EUR-Lex</td><td>11,585</td><td>3,865</td><td colspan=\"3\">171,120 3,956 5.32</td><td>15.59</td><td colspan=\"2\">1,225.20 1,248.07</td></tr><tr><td colspan=\"7\">Kanshan-Cup 2,799,967 200,000 411,721 1999 2.34 3513.13</td><td>38.06</td><td>35.48</td></tr></table>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Summary of Experimental Datasets. is the number of training instances, M is the number of test instances, D is the total number of words, L is the total number of classes,L is the average number of labels per document, L is the average number of documents per label,W is the average number of words per document in the training set,W is the average number of words per document in the testing set." |
|
}, |
|
"TABREF2": { |
|
"content": "<table><tr><td>Parameter Setting: For the KanShan-Cup</td></tr><tr><td>dataset, we use the pre-trained word embedding</td></tr><tr><td>and label embedding public in the official web-</td></tr><tr><td>site, where the embedding space size is 256, i.e.,</td></tr><tr><td>k = 256. The whole model is trained via Adam (Kingma</td></tr><tr><td>and Ba, 2014) with the learning rate being 0.001.</td></tr><tr><td>The parameters of all baselines are either adopted</td></tr><tr><td>from their original papers or determined by exper-</td></tr><tr><td>iments.</td></tr></table>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "The parameters corresponding to the weights between neurals are d a = 200 for W 1 and W 2 , b = 256 for W 5 and W 6 . For other three datasets, k = 300, d a = 200 and b = 300." |
|
}, |
|
"TABREF4": { |
|
"content": "<table><tr><td>Datasets</td><td>Metrics</td><td colspan=\"2\">XML-CNN DXML</td><td>SGM</td><td colspan=\"3\">AttentionXML EXAM LSAN(ours)</td></tr><tr><td>RCV1</td><td>nDCG@3</td><td>89.89%</td><td colspan=\"2\">89.83% 91.76%</td><td>91.88%</td><td>86.85%</td><td>92.83%</td></tr><tr><td/><td>nDCG@5</td><td>90.77%</td><td colspan=\"2\">90.21% 90.69%</td><td>92.70%</td><td>87.71%</td><td>93.43%</td></tr><tr><td>AAPD</td><td>nDCG@3</td><td>71.12%</td><td colspan=\"2\">77.23% 72.36%</td><td>78.01%</td><td>79.10%</td><td>80.84%</td></tr><tr><td/><td>nDCG@5</td><td>75.93%</td><td colspan=\"2\">80.99% 75.35%</td><td>82.31%</td><td>82.79%</td><td>84.78%</td></tr><tr><td>EUR-Lex</td><td>nDCG@3</td><td>58.62%</td><td colspan=\"2\">63.96% 60.72%</td><td>56.21%</td><td>65.12%</td><td>68.32%</td></tr><tr><td/><td>nDCG@5</td><td>53.10%</td><td colspan=\"2\">57.60% 55.24%</td><td>50.78%</td><td>59.43%</td><td>62.47%</td></tr><tr><td colspan=\"2\">Kanshan-Cup nDCG@3</td><td>46.65%</td><td colspan=\"2\">49.54% 46.90%</td><td>51.03%</td><td>49.32%</td><td>51.43%</td></tr><tr><td/><td>nDCG@5</td><td>49.60%</td><td colspan=\"2\">52.16% 50.47%</td><td>53.96%</td><td>49.74%</td><td>54.36%</td></tr></table>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Comparing LSAN with five baselines in terms of P @K (K=1,3,5)on four benchmark datasets." |
|
}, |
|
"TABREF5": { |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Comparing LSAN with five baselines in terms of nDCG@K (K=3,5) on four benchmark datasets." |
|
} |
|
} |
|
} |
|
} |