|
{ |
|
"paper_id": "D19-1043", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T16:09:18.586222Z" |
|
}, |
|
"title": "Investigating Capsule Network and Semantic Feature on Hyperplanes for Text Classification", |
|
"authors": [ |
|
{ |
|
"first": "Chunning", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "EBUPT Information Technology Co., Ltd", |
|
"location": { |
|
"postCode": "100191", |
|
"settlement": "Beijing", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "duchunning@ebupt.com" |
|
}, |
|
{ |
|
"first": "Haifeng", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "sunhaifeng1@ebupt.com" |
|
}, |
|
{ |
|
"first": "Jingyu", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "wangjingyu@ebupt.com" |
|
}, |
|
{ |
|
"first": "Qi", |
|
"middle": [], |
|
"last": "Qi", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "qiqi@ebupt.com" |
|
}, |
|
{ |
|
"first": "Jianxin", |
|
"middle": [], |
|
"last": "Liao", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Chun", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "As an essential component of natural language processing, text classification relies on deep learning in recent years. Various neural networks are designed for text classification on the basis of word embedding. However, polysemy is a fundamental feature of the natural language, which brings challenges to text classification. One polysemic word contains more than one sense, while the word embedding procedure conflates different senses of a polysemic word into a single vector. Extracting the distinct representation for the specific sense could thus lead to fine-grained models with strong generalization ability. It has been demonstrated that multiple senses of a word actually reside in linear superposition within the word embedding so that specific senses can be extracted from the original word embedding. Therefore, we propose to use capsule networks to construct the vectorized representation of semantics and utilize hyperplanes to decompose each capsule to acquire the specific senses. A novel dynamic routing mechanism named 'routing-on-hyperplane' will select the proper sense for the downstream classification task. Our model is evaluated on 6 different datasets, and the experimental results show that our model is capable of extracting more discriminative semantic features and yields a significant performance gain compared to other baseline methods.", |
|
"pdf_parse": { |
|
"paper_id": "D19-1043", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "As an essential component of natural language processing, text classification relies on deep learning in recent years. Various neural networks are designed for text classification on the basis of word embedding. However, polysemy is a fundamental feature of the natural language, which brings challenges to text classification. One polysemic word contains more than one sense, while the word embedding procedure conflates different senses of a polysemic word into a single vector. Extracting the distinct representation for the specific sense could thus lead to fine-grained models with strong generalization ability. It has been demonstrated that multiple senses of a word actually reside in linear superposition within the word embedding so that specific senses can be extracted from the original word embedding. Therefore, we propose to use capsule networks to construct the vectorized representation of semantics and utilize hyperplanes to decompose each capsule to acquire the specific senses. A novel dynamic routing mechanism named 'routing-on-hyperplane' will select the proper sense for the downstream classification task. Our model is evaluated on 6 different datasets, and the experimental results show that our model is capable of extracting more discriminative semantic features and yields a significant performance gain compared to other baseline methods.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Text classification is a crucial task in natural language processing, which has many applications, such as sentiment analysis, intent identification and topic labeling [Aggarwal and Zhai, 2012; Wang and Manning, 2012a] . Recent years, many studies rely on neural networks and have shown promising performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 168, |
|
"end": 193, |
|
"text": "[Aggarwal and Zhai, 2012;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 194, |
|
"end": 218, |
|
"text": "Wang and Manning, 2012a]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The success of deep learning model for NLP is based on the progress in learning distributed word representations in semantic vector space, where each word is mapped to a vector called a word embedding. The word's representation is calculated relying on the distributional hypothesisthe assumption that semantically similar or related words appear in similar contexts [Mikolov et al., 2013; Langendoen, 1959] . Normally, each word's representation is constructed by counting all its context features. However, for the polysemic word which contains multiple senses, the context features of different senses are mixed together, leading to inaccurate word representation. As demonstrated in [Arora et al., 2018] , multiple senses of a word actually reside in linear superposition within the word embedding:", |
|
"cite_spans": [ |
|
{ |
|
"start": 367, |
|
"end": 389, |
|
"text": "[Mikolov et al., 2013;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 390, |
|
"end": 407, |
|
"text": "Langendoen, 1959]", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 687, |
|
"end": 707, |
|
"text": "[Arora et al., 2018]", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "v \u2248 \u03b1 1 v sense1 +\u03b1 2 v sense2 +\u03b1 3 v sense3 +\u2022 \u2022 \u2022 , (1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "where coefficients \u03b1 i are nonnegative and v sense1 , v sense2 ... are the hypothetical embeddings of different senses. As a result, the word embedding v deviates from any sense, which brings ambiguity for the subsequent task. It demands us to extract the separate senses from the overall word representation to avoid the ambiguity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Similar to the word embedding, the recently proposed capsule network constructs vectorized representations for different entities [Hinton et al., 2018; Sabour et al., 2017; Hinton et al., 2011] . A dynamic routing mechanism, 'routingby-agreement', is implemented to ensure that the output of the capsule gets sent to an appropriate parent in the layer above. Very recently, capsule network is applied in the field of NLP where each capsule is obtained from the word embedding. Compared with the standard neural nets using a single scalar (the output of a neural unit) to represent the detected semantics, the vectorized rep-resentation in capsule network enables us to utilize hyperplanes to extract the component from the overall representation and get the specific sense.", |
|
"cite_spans": [ |
|
{ |
|
"start": 130, |
|
"end": 151, |
|
"text": "[Hinton et al., 2018;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 152, |
|
"end": 172, |
|
"text": "Sabour et al., 2017;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 173, |
|
"end": 193, |
|
"text": "Hinton et al., 2011]", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Therefore, we propose to attach different hyperplanes to capsules to tackle the ambiguity caused by polysemy. Each capsule is decomposed by projecting the output vector on the hyperplanes, which can extract the specific semantic feature. The projected capsule denotes a specific sense of the target words, and a novel dynamic routing mechanism named 'routing-on-hyperplane' will decide which specific senses are selected for the downstream classification and which senses are ignored. Similar to routing-by-agreement [Sabour et al., 2017] , we aim to activate a higher-level capsule whose output vector is agreed with the predictions from the lower-level capsules. Differently, before the active capsule at a level makes predictions for the next-level capsules, the capsule's output vector will be projected on the trainable hyperplane. The hyperplanes will be trained discriminatively to extract specific senses. Moreover, in order to encourage the diversity of the hyperplanes, a well-designed penalization term is implemented in our model. We define the cosine similarity between the normal vectors of the hyperplanes as a measure of redundancy, and minimize it together with the original loss. We test our model (HCapsNet) on the text classification task and conduct extensive experiments on 6 datasets. Experimental results show that the proposed model could learn more discriminative features and outperform other baselines. Our main contributions are summarized as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 517, |
|
"end": 538, |
|
"text": "[Sabour et al., 2017]", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We explore the capsule network for text classification and propose to decompose capsules by means of projecting on hyperplanes to tackle the polysemy problem in natural language.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Propose routing-on-hyperplane to dynamically select specific senses for the subsequent classification. A penalization term is designed to obtain diversified hyperplanes and offer multiple senses representations of words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Our work is among the few studies which prove that the idea of capsule networks have promising applications on natural language processing tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "2 Related Work", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Various neural networks for text classification have been proposed based on the word embedding. Commonly used models include convolutional neural networks [Kim, 2014] , recursive neural network [Socher et al., 2013] and recurrent neural networks. There have been several recent studies of CNN for text classification in the large training dataset and deep complex model structures [Schwenk et al., 2017; Johnson and Zhang, 2017] . Some models were proposed to combine the strength of CNN and RNN [Lai et al., 2015; Zhang et al., 2016] . Moreover, the accuracy was further improved by attention-based neural networks [Lin et al., 2017; Vaswani et al., 2017; Yang et al., 2016] . However, these models are less efficient than capsule networks. As a universal phenomenon of language, polysemy calls much attention of linguists. It has been demonstrated that learning a distinct representation for each sense of an ambiguous word could lead to more powerful and fine-grained models based on vector-space representations [Li and Jurafsky, 2015 ].", |
|
"cite_spans": [ |
|
{ |
|
"start": 155, |
|
"end": 166, |
|
"text": "[Kim, 2014]", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 194, |
|
"end": 215, |
|
"text": "[Socher et al., 2013]", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 381, |
|
"end": 403, |
|
"text": "[Schwenk et al., 2017;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 404, |
|
"end": 428, |
|
"text": "Johnson and Zhang, 2017]", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 496, |
|
"end": 514, |
|
"text": "[Lai et al., 2015;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 515, |
|
"end": 534, |
|
"text": "Zhang et al., 2016]", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 616, |
|
"end": 634, |
|
"text": "[Lin et al., 2017;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 635, |
|
"end": 656, |
|
"text": "Vaswani et al., 2017;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 657, |
|
"end": 675, |
|
"text": "Yang et al., 2016]", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 1016, |
|
"end": 1038, |
|
"text": "[Li and Jurafsky, 2015", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Networks for Text Classification", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Capsule network was proposed to improve the representational limitations of CNN and RNN by extracting features in the form of vectors. The technique was firstly proposed in [Hinton et al., 2011] and improved in [Sabour et al., 2017] and [Hinton et al., 2018] . Vector-based representation is able to encode latent inter-dependencies between groups of input features during the learning process. Introducing capsules also allows us to use routing mechanism to generate high-level features which is a more efficient way for feature encoding.", |
|
"cite_spans": [ |
|
{ |
|
"start": 173, |
|
"end": 194, |
|
"text": "[Hinton et al., 2011]", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 211, |
|
"end": 232, |
|
"text": "[Sabour et al., 2017]", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 237, |
|
"end": 258, |
|
"text": "[Hinton et al., 2018]", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Capsule Network", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Several types of capsule networks have been proposed for natural language processing. investigated capsule networks with routing-by-agreement for text classification. They also found that capsule networks exhibit significant improvement when transfer single-label to multi-label text classification. Capsule networks also show a good performance in multi-task learning [Xiao et al., 2018] . Xia et al. [2018] discovered the capsule-based model's potential on zeroshot learning. However, existing capsule networks for natural language processing cannot model the polysemic words or phrases which contain multiple senses.", |
|
"cite_spans": [ |
|
{ |
|
"start": 369, |
|
"end": 388, |
|
"text": "[Xiao et al., 2018]", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 391, |
|
"end": 408, |
|
"text": "Xia et al. [2018]", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Capsule Network", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "In this section, we begin by introducing the idea of routing-on-hyperplane and formulate it in details. Then the architecture of the HCapsNet is formally presented in the second subsection. Finally, the penalization term and loss function implemented in this paper are explained.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Suppose that we have already decided on the output vectors of all the capsules in the layer L and we now want to decide which capsules to activate in the layer L + 1. We should also consider how to assign each active capsule in the layer L to one active capsule in the layer L + 1. The output vector of capsule i in the layer L is denoted by u i , and the output vector of capsule j in the layer L + 1 is denoted by v j .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Routing-on-hyperplane", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Firstly, for all the capsules in the layer L, we attach the trainable hyperplane to each capsule. Capsule's output vectors will be projected on the hyperplanes before making predictions. More specifically, for capsule i, we define the trainable matrix W h i , which is used to decide the normal vector w i of the attached hyperplane. By restricting w i 2 = 1, we can get the projected capsule's output vector u \u22a5i :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Routing-on-hyperplane", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "w i = W h i u i . (2) u \u22a5i = u i \u2212 w T i u i w i .", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Routing-on-hyperplane", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In this way, the output vectors of capsules will be projected on the specific hyperplanes to get different components which denote the specific senses in our task. To retain or ignore a specific sense (projected capsule) will be decided through an iterative procedure. The procedure contains making predictions and calculating the agreement. When one projected capsule's prediction is highly agreed with one target parent capsule, the probability of retaining the projected capsule gets gradually larger. In another word, when a specific sense is highly relevant with the subsequent classification, we choose to keep it and ignore others. Therefore, the u \u22a5i will then be used to make predictions for the L + 1 layer's capsules and calculate coupling coefficients c ij . When making Algorithm 1 Routing-on-hyperplane returns the output vector of capsule j in the layer L + 1 given the output vector of capsule i in the layer L. W ij are trainable parameters denoting the transformation matrix between the two adjacent layers. W h i are trainable parameters for each capsule i to calculate the proposed hyperplane's normal vectors w i , we restrict that w i 2 = 1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Routing-on-hyperplane", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "1: initialize the routing logits:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Routing-on-hyperplane", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "for all capsule i in the layer L and capsule j in the layer L + 1: b ij \u2190 0; 2: for every capsule i in the layer L:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Routing-on-hyperplane", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "w i = W h i u i 3: for every capsule i in the layer L: u \u22a5i \u2190 u i \u2212 w T i u i w i 4: for every capsule i in the layer L:\u00fb \u22a5j|i = W ij u \u22a5i 5: for r iterations do 6:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Routing-on-hyperplane", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "for all capsule i and j:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Routing-on-hyperplane", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "c ij \u2190 exp(b ij ) k exp(b ik ) 7:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Routing-on-hyperplane", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "for all capsule j in the layer L + 1:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Routing-on-hyperplane", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "s \u22a5j \u2190 i c ij\u00fb\u22a5j|i 8: for all capsule j in the layer L + 1: v j \u2190 s \u22a5j 2 1+ s \u22a5j 2 s \u22a5j s \u22a5j 9:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Routing-on-hyperplane", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "for all capsule i and capsule j:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Routing-on-hyperplane", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "b ij \u2190 b ij + u \u22a5j|i \u2022 v j 10: end for 11: return v j", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Routing-on-hyperplane", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "predictions, the capsules in the layer L will multiply their projected output vector u \u22a5i by a weight matrix W ij :\u00fb", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Routing-on-hyperplane", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u22a5j|i = W ij u \u22a5i ,", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Routing-on-hyperplane", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where\u00fb \u22a5j|i denotes the 'vote' of the capsule i for the capsule j. The agreement between the prediction vector\u00fb \u22a5j|i with current output vector of parent capsule j will be fed back to the coupling coefficients c ij between the two capsules: increase c ij if highly agreed. Similar with [Sabour et al., 2017] we define the agreement as scalar product between the two vectors. b ij is the accumulation of the agreement after each iteration and the softmax function is implemented to ensure the coupling coefficients between the capsule i and all the capsules in the layer above sum to one:", |
|
"cite_spans": [ |
|
{ |
|
"start": 286, |
|
"end": 307, |
|
"text": "[Sabour et al., 2017]", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Routing-on-hyperplane", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "b ij \u2190 b ij +\u00fb \u22a5j|i \u2022 v j", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Routing-on-hyperplane", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "c ij = exp(b ij ) k exp(b ik )", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "Routing-on-hyperplane", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Each iteration will result in a temporary output vector of the capsule j: the weighted sum over all prediction vectors\u00fb \u22a5j|i using coefficient c ij . Moreover, to ensure the length of the output vector of capsule j is able to represent the probability and prevent it from being too big, we use a non-linear 'squashing' function to make the vector's length range from zero to one without changing the vector's direction:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Routing-on-hyperplane", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "s \u22a5j = i c ij\u00fb\u22a5j|i . (7) v j = s \u22a5j 2 1 + s \u22a5j 2 s \u22a5j s \u22a5j .", |
|
"eq_num": "(8)" |
|
} |
|
], |
|
"section": "Routing-on-hyperplane", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The v j will then be returned as input to calculate the agreement for the next iteration. The coupling coefficients c ij and the output vector of capsule j gradually converge after several iterations. After the last iteration of the routing process, the coupling coefficients c ij is determined. Hyperplane plays the role to extract specific senses and assist to route the lower-level capsules to the right parent capsules. We detail the whole routing-onhyperplane algorithm in Algorithm 1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Routing-on-hyperplane", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We propose a model named HCapsNet for text classification based on the theory of capsule network and routing-on-hyperplane. The architecture is illustrated in Figure 1 . The model consists of three layers: one bi-directional recurrent layer, one convolutional capsule layer, and one fully connected capsule layer. The input of the model is a sentence S consisting of a sequence of word tokens t 1 , t 2 , ..., t n . The output of the model contains a series of capsules. Each top-level capsule corresponds to a sentence category. The length of the top-level capsule's output vector is the probability p that the input sentence S belongs to the corresponding category.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 159, |
|
"end": 167, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "HCapsNet Model Architecture", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The recurrent neural network can capture longdistance dependencies within a sentence. For this strength, a bi-directional recurrent neural network is the first layer of HCapsNet. We concatenate the left context and the right context as the word's elementary representation x i , which is the input to the second layer:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "HCapsNet Model Architecture", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "c l (t i ) = \u2212\u2212\u2212\u2192 RN N (t i ),", |
|
"eq_num": "(9)" |
|
} |
|
], |
|
"section": "HCapsNet Model Architecture", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "c r (t i ) = \u2190\u2212\u2212\u2212 RN N (t i ),", |
|
"eq_num": "(10)" |
|
} |
|
], |
|
"section": "HCapsNet Model Architecture", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "x i = [c l (t i ), c r (t i )].", |
|
"eq_num": "(11)" |
|
} |
|
], |
|
"section": "HCapsNet Model Architecture", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The second layer is a convolutional capsule layer. This is the first layer consisting of capsules, we call capsules in this layer as primary capsules. Primary capsules are groups of detected features which means piecing instantiated parts together to make familiar wholes. Since the output of the bidirectional recurrent neural network is not in the form of capsules, no routing method is used in this layer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "HCapsNet Model Architecture", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The final layer is fully connected capsule layer. Each capsule corresponds to a sentence class. All the capsules in this layer receive the output of the lower-level capsules by the routing-on-hyperplane method as we described in Section 3.1. The length of the top-level capsule's output vector represents the probability that the input sentence belongs to the corresponding category.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "HCapsNet Model Architecture", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The HCapsNet may suffer from redundancy problem if the output vectors of capsules are always getting projected on the similar hyperplanes at the routing-on-hyperplane procedure. Thus, we need a penalization term to encourage the diversity of the hyperplanes. We introduce an easy penalization term with low time complexity and space cost. Firstly, we construct a matrix X i the columns of which is the normal vectors w of the hyperplanes for the ith word. The dot product of X i and its transpose, subtracted by an identity matrix is defined as a measure of redundancy. The penalization term is the sum of all the words' redundancy:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Penalization Term", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P i = ||(X i X T i \u2212 I)|| 2 F .", |
|
"eq_num": "(12)" |
|
} |
|
], |
|
"section": "Penalization Term", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P = i P i ,", |
|
"eq_num": "(13)" |
|
} |
|
], |
|
"section": "Penalization Term", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "where || \u2022 || F stands for the Frobenius norm of a matrix. Similar to adding the L2 regularization term, this penalization term P will be multiplied by a coefficient, and we minimize it together with the original loss. Let's consider the two columns w a and w b in X i , which are two normal vectors of hyperplanes for the ith word. We have restricted that ||w|| = 1 as described in Algorithm 1. For any non-diagonal elements x ab (a = b) in the X i X T i matrix, it corresponds to the cosine similarity between the two ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Penalization Term", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u22121 < x ab = k w a k w b k < 1.", |
|
"eq_num": "(14)" |
|
} |
|
], |
|
"section": "Penalization Term", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "where w a k and w b k are k-th element in the w a and w b vectors, respectively. In the most extreme case, where the two normal vectors of hyperplanes are orthometric with each other, i.e. the word is projected to two extremely different meanings, the corresponding x ab is 0. Otherwise, the absolute value will be positive. In the other most extreme case, where the two normal vectors of hyperplanes are identical, i.e. the word is projected to the same vector, the corresponding absolute value of x ab is 1. The diagonal elements x ab (a = b) in the X i X T i matrix is the normal vectors' cosine similarity with themselves, so they are all 1. The X i X T i is subtracted by an identity matrix I so as to eliminate the meaningless elements. We minimize the Frobenius norm of P i to encourage the non-diagonal elements in P i to converge to 0, in another word, to encourage word vector to be projected on orthometric hyperplanes and get diversified explanation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Penalization Term", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In HCapsNet, each top-level capsule corresponds to a sentence category. The length of the top-level capsule's output vector represents the probability that the input sentence belongs to the corresponding category. We would like the top-level capsule for the category k to have a long output vector if the input sentence belongs to the category k and have a short output vector if the input sentence does not belong to the category k. Similar with [Sabour et al., 2017] , We use a separate margin loss, L k for each top-level capsule k. The total loss L is simply the sum of the losses of all toplevel capsules:", |
|
"cite_spans": [ |
|
{ |
|
"start": 447, |
|
"end": 468, |
|
"text": "[Sabour et al., 2017]", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Loss Function", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "L = k {T k max(0, m + \u2212 ||v k ||) 2 + \u03bb 1 (1 \u2212 T k ) max(0, ||v k || \u2212 m \u2212 ) 2 } + \u03bb 2 P,", |
|
"eq_num": "(15)" |
|
} |
|
], |
|
"section": "Loss Function", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "where T k will be 1 if the sentence belongs to the k class, or else T k will be 0. ||v k || is the length of the output vector of capsule k. We introduce \u03bb 1 to reduce the penalization to avoid shrinking the length of the capsules' output vectors in the initial learning stage. P is the penalization term introduced in Section 3.3. In our experiments, m + = 0.9, m \u2212 = 0.1, \u03bb 1 = 0.5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Loss Function", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "We compare our method with the widely used text classification methods and baseline models (listed in Table 1 ).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 102, |
|
"end": 109, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "HCapsNet is evaluated on six widely studied datasets including three common text classification tasks: sentiment analysis, question classification and topic classification. These datasets are Stanford Sentiment Treebank [Socher et al., 2013] , Movie Review Data [Pang and Lee, 2005] , Subjectivity dataset [Pang and Lee, 2004] , TREC [Li and Roth, 2002] and AG's corpus of news articles [Zhang et al., 2015b] . Summary statistics of the datasets are listed in Table2.", |
|
"cite_spans": [ |
|
{ |
|
"start": 220, |
|
"end": 241, |
|
"text": "[Socher et al., 2013]", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 262, |
|
"end": 282, |
|
"text": "[Pang and Lee, 2005]", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 306, |
|
"end": 326, |
|
"text": "[Pang and Lee, 2004]", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 334, |
|
"end": 353, |
|
"text": "[Li and Roth, 2002]", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 387, |
|
"end": 408, |
|
"text": "[Zhang et al., 2015b]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Method SST-2 SST-5 MR Subj TREC AG's news SVM [Socher et al., 2013] 79.4 40.7 ----NB [Socher et al., 2013] 81 ", |
|
"cite_spans": [ |
|
{ |
|
"start": 46, |
|
"end": 67, |
|
"text": "[Socher et al., 2013]", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 85, |
|
"end": 106, |
|
"text": "[Socher et al., 2013]", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In our experiments, we use 300-dimensional word2vec [Mikolov et al., 2013] vectors to initialize word representations. In the first bi-directional RNN layer of HCapsNet, we use Long Short Term Memory network, the dimension of the hidden state is 256. The second layer contains 32 channels of primary capsules and the number of capsules in one channel depends on the sentence length. Each primary capsule contains 8 atoms which means that the dimension of the primary capsules is 8. The top-level capsules are obtained after 3 routing iterations. The dimension of the output vector of top-level capsules is 16. For all the datasets, we conduct mini-batch with size 25. We use Adam [Kingma and Ba, 2014] as our optimization method with 1e \u2212 3 learning rate. \u03bb 2 is 0.01. Table 1 reports the results of our model on different datasets comparing with the widely used text classification methods and state-of-the-art approaches. We can have the following observations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 52, |
|
"end": 74, |
|
"text": "[Mikolov et al., 2013]", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 769, |
|
"end": 776, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Hyperparameters", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Our HCapsNet achieves the best results on 5 out of 6 datasets, which verifies the effectiveness of our model. In particular, HCapsNet outperforms vanilla capsule network Capsule-B ] by a remarkable margin, which only utilizes the dynamic routing mechanism without hyperplane projecting.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussions", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "HCapNet does not perform best on the TREC dataset. One main reason maybe TREC dataset is used for question type classification, where samples are all question sentences. The task is mainly determined by interrogative words. For example, the sentence containing 'where' will probably be classified to 'location'. The ability to tackle polysemy doesn't play an important role. So, our model gets a similar result with Capsule-B.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussions", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "To analyze the effect of different components including hyperplane projection, penalization term, and routing iterations, we report the results of variants of HCapsNet in Table 4 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 171, |
|
"end": 178, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ablation Study", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "The results show that capsule network performs best when conducting 3 routing iterations, which stays in line with the conclusion in [Sabour et 0.1758, -0.3037, -0.3679, 0.0992, 0.3996, -0.6481, 0.3668, 0.1267] N N the [cold] and dreary weather is a perfect metaphor for the movie itself , which contains few laughs and not much drama [-0.3810, -0.3923, -0.3016, -0.3045, 0.2417, -0.3109, 0.5999, -0.0391] N \u00d7 P Table 3 : Projected primary capsule's representations for polysemic words. P and N denote positive and negative classification results, respectively. denotes the right classification and \u00d7 denotes the incorrect classification.", |
|
"cite_spans": [ |
|
{ |
|
"start": 133, |
|
"end": 143, |
|
"text": "[Sabour et", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 144, |
|
"end": 210, |
|
"text": "0.1758, -0.3037, -0.3679, 0.0992, 0.3996, -0.6481, 0.3668, 0.1267]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 335, |
|
"end": 405, |
|
"text": "[-0.3810, -0.3923, -0.3016, -0.3045, 0.2417, -0.3109, 0.5999, -0.0391]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 412, |
|
"end": 419, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ablation Study", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "\u2022 Almost every scene in this film is a gem that could stand alone, a perfectly realized observation of mood, behavior and intent.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ablation Study", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "A spunky, original take on a theme that will resonate with singles of many ages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The story drifts so inexorable into cliches about tortured lrb and torturing rrb artists and consuming but impossible love that you can't help but become more disappointed as each overwrought new sequence plods on.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The premise is in extremely bad taste, and the film's supposed insights are so poorly thought out and substance free that even a high school senior taking his or her first psychology class could dismiss them. al., 2017; . Compared with the vanilla capsule network (row 3), applying routingon-hyperplane brings a noticeable improvement (row 2). This demonstrates the necessity of integrating hyperplane projecting at the routing procedure to tackle the polysemy problems. Moreover, the penalization term described in Section 3.3 also marginally improves the accuracy, which proves that the orthogonal constraint on hyperplane is beneficial for text classification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Hyperplane Pena. Iterations Accuracy 3 83.5 \u00d7 3 83.2 \u00d7 \u00d7 3 82.5 1 81.5 5 82.6 Table 4 : Ablation study on MR dataset. \"Pena.\" denotes Penalization Term in Section 3.3. Table 3 shows some sample cases from SST validation dataset, which are movie reviews for sentiment analysis. We analyze the attended primary capsule representation for the polysemic words in brackets. Specifically, we report the output vectors of the projected primary capsule, which is mostly attended by the routing mechanism. The word 'wonder' in the first sample sentence means something that fills you with surprise and admiration, which shows a very positive sentiment. However, the polysemic word 'wonder' in the second and third sentences means to think about something and try to decide what is true, which is neutral in sentiment. We can observe that for the same word, the attended projected capsule representations are quite different according to different word senses. The projected representations for the same sense are similar, the Euclidean distance is 0.23 (row 2,3). On the contrary, for the different senses, the Euclidean distance is 1.12 (row 1,2). This property helps our model to make the predictions all correctly, while Capsule-B can not handle the latter two sentences. Similarly, the word 'cold' conveys two different senses in the last two samples (row 4-5), which means cruel and low temperature, respectively. The corresponding projected vectors are also quite different, which verifies the ability to tackle polysemy by routing-on-hyperplane.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 78, |
|
"end": 85, |
|
"text": "Table 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 168, |
|
"end": 175, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "\u2022", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "After several iterations of the routing algorithm, each primary capsule and the top-level capsule will be connected via a calculated coupling coefficient. The coupling coefficient corresponds to how much contribution of a low-level capsule to a specific high-level capsule. Routing-on-hyperplane can also be viewed as a parallel attention mech- anism that allows each capsule at one level to attend to some active capsules at the level below and to ignore others. We can thus draw a heat map to figure which phrases are taken into account a lot, and which ones are skipped by the routing-onhyperplane in the task of text classification. We randomly select 4 examples of reviews from the test set of SST, when the model has a high confidence (>0.8) in predicting the label. As shown in Figure 2 , the words whose coupling coefficient greater than 0.7 are marked. It is easy to conclude that our routing method can effectively extract the sentimental words that indicate strongly on the sentiment behind the sentence and assign a greater coupling coefficient between the corresponding capsules. For example, 'gem', 'spunky', 'disappointed', 'bad taste' etc.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 785, |
|
"end": 793, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Visualizing Routing Results", |
|
"sec_num": "4.6" |
|
}, |
|
{ |
|
"text": "In order to assess the effect of the hyperplane, we randomly select 3 examples in SST dataset and draw the distribution maps of primary capsules' output vectors before and after the projection operation respectively. As the dimension of the primary capsule's output vector is 8, T-Distributed Stochastic Neighbor Embedding (t-SNE) is performed on the vectors to reduce the dimension for visualization. As illustrated in Figure 3 , the three pictures in the first line show the distribution before the projection operation for the three example sentences respectively. And the three pictures in the second line show the distribution after the projection. The blue points in the distribution maps denote the normal words and the red crosses denote the words attended by the routing algorithm which are defined in Section 4.6.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 420, |
|
"end": 428, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Visualizing Effects of The Hyperplane", |
|
"sec_num": "4.7" |
|
}, |
|
{ |
|
"text": "The relationship between the semantic capsules can be estimated by analyzing the distribution of the low-dimensional data in Figure 3 . We find that originally scattered points which denote attended words converge after the projection. The attended words' projected vectors are close with each other, showing that they contain similar senses which are beneficial for the subsequent task. On the contrary, the capsules before projection contain multiple senses and show a scattered pattern. This demonstrates that the hyperplanes can effectively extract the guided senses and get attended by the routing-on-hyperplane mechanism.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 125, |
|
"end": 133, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Visualizing Effects of The Hyperplane", |
|
"sec_num": "4.7" |
|
}, |
|
{ |
|
"text": "In this paper, we explore the capsule network for text classification and propose to decompose the capsule by means of projecting on hyperplanes to tackle the polysemy problem in natural language. Routing-on-hyperplane, a dynamic routing method, is implemented to select the sensespecific projected capsules for the subsequent classification task. We assess the effect of the hyperplane by case study and analyzing the distribution of the capsules' output vectors. The experiments demonstrate the superiority of HCapsNet and our proposed routing-on-hyperplane method outperforms the existing routing method in the text classification task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In future, we would like to investigate the application of our theory in various tasks including reading comprehension and machine translating. We believe that capsule networks have broad applicability on the natural language processing tasks. Our core idea that decomposing the semantic capsules by projecting on hyperplanes is a necessary complement to capsule network to tackle the polysemy problem in various natural language processing tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "5" |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work was jointly supported by: (1) National Natural Science Foundation of China (No. 61771068, 61671079, 61471063, 61372120, 61421061); 2 ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "A survey of text classification algorithms", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Charu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chengxiang", |
|
"middle": [], |
|
"last": "Aggarwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zhai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Mining Text Data", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Charu C. Aggarwal and ChengXiang Zhai. A survey of text classification algorithms. In Mining Text Data. 2012.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Linear algebraic structure of word senses", |
|
"authors": [ |
|
{ |
|
"first": "Sanjeev", |
|
"middle": [], |
|
"last": "Arora", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuanzhi", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yingyu", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tengyu", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrej", |
|
"middle": [], |
|
"last": "Risteski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sanjeev Arora, Yuanzhi Li, Yingyu Liang, Tengyu Ma, and Andrej Risteski. Linear algebraic structure of word senses, with applications to polysemy. TACL, 2018.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Transforming auto-encoders", |
|
"authors": [ |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Hinton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Krizhevsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sida", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Artificial Neural Networks and Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Geoffrey E. Hinton, Alex Krizhevsky, and Sida D. Wang. Transforming auto-encoders. In Artificial Neural Networks and Machine Learning, 2011.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Matrix capsules with em routing", |
|
"authors": [ |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicholas", |
|
"middle": [], |
|
"last": "Frosst", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Sabour", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Geoffrey Hinton, Nicholas Frosst, and Sara Sabour. Matrix capsules with em routing. 2018.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Deep pyramid convolutional neural networks for text categorization", |
|
"authors": [ |
|
{ |
|
"first": "Rie", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tong", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rie Johnson and Tong Zhang. Deep pyramid convo- lutional neural networks for text categorization. In ACL, 2017.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Convolutional neural networks for sentence classification", |
|
"authors": [ |
|
{ |
|
"first": "Yoon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoon Kim. Convolutional neural networks for sentence classification. In EMNLP, 2014.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. CoRR, 2014.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Recurrent convolutional neural networks for text classification", |
|
"authors": [ |
|
{ |
|
"first": "Siwei", |
|
"middle": [], |
|
"last": "Lai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liheng", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the Twenty-Ninth AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Siwei Lai, Liheng Xu, Kang Liu, and Jun Zhao. Re- current convolutional neural networks for text clas- sification. In Proceedings of the Twenty-Ninth AAAI Conference on Artificial Intelligence, 2015.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Studies in linguistic analysis", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Terence", |
|
"middle": [], |
|
"last": "Langendoen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1959, |
|
"venue": "International Journal of American Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D. Terence Langendoen. Studies in linguistic analy- sis. International Journal of American Linguistics, 1959.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Do multi-sense embeddings improve natural language understanding?", |
|
"authors": [ |
|
{ |
|
"first": "Jiwei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiwei Li and Dan Jurafsky. Do multi-sense embed- dings improve natural language understanding? In EMNLP, 2015.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Learning question classifiers", |
|
"authors": [ |
|
{ |
|
"first": "Xin", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "19th International Conference on Computational Linguistics, COLING", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xin Li and Dan Roth. Learning question classifiers. In 19th International Conference on Computational Linguistics, COLING, 2002.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "A structured self-attentive sentence embedding", |
|
"authors": [ |
|
{ |
|
"first": "Zhouhan", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minwei", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C\u00edcero", |
|
"middle": [], |
|
"last": "Nogueira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mo", |
|
"middle": [], |
|
"last": "Santos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bowen", |
|
"middle": [], |
|
"last": "Xiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhouhan Lin, Minwei Feng, C\u00edcero Nogueira dos San- tos, Mo Yu, Bing Xiang, Bowen Zhou, and Yoshua Bengio. A structured self-attentive sentence embed- ding. international conference on learning repre- sentations, 2017.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Efficient estimation of word representations in vector space", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Computer Science", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. Efficient estimation of word representations in vector space. Computer Science, 2013.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "A sentimental education: Sentiment analysis using subjectivity summarization based on minimum cuts", |
|
"authors": [ |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Pang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lillian", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the 42nd Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bo Pang and Lillian Lee. A sentimental education: Sentiment analysis using subjectivity summarization based on minimum cuts. In Proceedings of the 42nd Annual Meeting of the Association for Computa- tional Linguistics, 2004.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Seeing stars: Exploiting class relationships for sentiment categorization with respect to rating scales", |
|
"authors": [ |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Pang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lillian", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "ACL 2005", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bo Pang and Lillian Lee. Seeing stars: Exploiting class relationships for sentiment categorization with re- spect to rating scales. In ACL 2005, 2005.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Deep contextualized word representations", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "NAACL HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew E. Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. Deep contextualized word representa- tions. In NAACL HLT, 2018.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Dynamic routing between capsules", |
|
"authors": [ |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Sabour", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicholas", |
|
"middle": [], |
|
"last": "Frosst", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Hinton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "NIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sara Sabour, Nicholas Frosst, and Geoffrey E. Hinton. Dynamic routing between capsules. In NIPS, 2017.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Very deep convolutional networks for text classification", |
|
"authors": [ |
|
{ |
|
"first": "Holger", |
|
"middle": [], |
|
"last": "Schwenk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lo\u00efc", |
|
"middle": [], |
|
"last": "Barrault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yann", |
|
"middle": [], |
|
"last": "Lecun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "EACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Holger Schwenk, Lo\u00efc Barrault, Alexis Conneau, and Yann LeCun. Very deep convolutional networks for text classification. In EACL, 2017.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Recursive deep models for semantic compositionality over a sentiment treebank", |
|
"authors": [ |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Perelygin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jean", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Chuang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Potts", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher D. Manning, Andrew Y. Ng, and Christopher Potts. Recursive deep models for semantic compositionality over a sentiment tree- bank. In EMNLP, 2013.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "NIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In NIPS, 2017.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Baselines and bigrams: Simple, good sentiment and topic classification", |
|
"authors": [ |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Sida", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sida I. Wang and Christopher D. Manning. Baselines and bigrams: Simple, good sentiment and topic clas- sification. In ACL, 2012.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Baselines and bigrams: Simple, good sentiment and topic classification", |
|
"authors": [ |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Sida", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sida I. Wang and Christopher D. Manning. Baselines and bigrams: Simple, good sentiment and topic clas- sification. In ACL, 2012.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Zero-shot user intent detection via capsule neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Congying", |
|
"middle": [], |
|
"last": "Xia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chenwei", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaohui", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Congying Xia, Chenwei Zhang, Xiaohui Yan, Yi Chang, and Philip S. Yu. Zero-shot user intent detection via capsule neural networks. In Proceed- ings of the 2018 Conference on Empirical Methods in Natural Language Processing, 2018.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Mcapsnet: Capsule network for text with multi-task learning", |
|
"authors": [ |
|
{ |
|
"first": "Liqiang", |
|
"middle": [], |
|
"last": "Xiao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Honglun", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenqing", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yongkun", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yaohui", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Liqiang Xiao, Honglun Zhang, Wenqing Chen, Yongkun Wang, and Yaohui Jin. Mcapsnet: Cap- sule network for text with multi-task learning. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, 2018.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Hierarchical attention networks for document classification", |
|
"authors": [ |
|
{ |
|
"first": "Zichao", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diyi", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Smola", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "NAACL HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zichao Yang, Diyi Yang, Chris Dyer, Xiaodong He, Alexander J. Smola, and Eduard H. Hovy. Hier- archical attention networks for document classifica- tion. In NAACL HLT, 2016.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Investigating capsule networks with dynamic routing for text classification", |
|
"authors": [ |
|
{ |
|
"first": "Min", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianbo", |
|
"middle": [], |
|
"last": "Ye", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zeyang", |
|
"middle": [], |
|
"last": "Lei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhou", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soufei", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Min Yang, Wei Zhao, Jianbo Ye, Zeyang Lei, Zhou Zhao, and Soufei Zhang. Investigating capsule net- works with dynamic routing for text classification. In Proceedings of the 2018 Conference on Empiri- cal Methods in Natural Language Processing, 2018.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Character-level convolutional networks for text classification", |
|
"authors": [ |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junbo", |
|
"middle": [ |
|
"Jake" |
|
], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yann", |
|
"middle": [], |
|
"last": "Lecun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Advances in Neural Information Processing Systems 28: Annual Conference on Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiang Zhang, Junbo Jake Zhao, and Yann LeCun. Character-level convolutional networks for text clas- sification. In Advances in Neural Information Pro- cessing Systems 28: Annual Conference on Neural Information Processing Systems, 2015.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Character-level convolutional networks for text classification", |
|
"authors": [ |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junbo", |
|
"middle": [ |
|
"Jake" |
|
], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yann", |
|
"middle": [], |
|
"last": "Lecun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "NIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiang Zhang, Junbo Jake Zhao, and Yann LeCun. Character-level convolutional networks for text clas- sification. In NIPS, 2015.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Dependency sensitive convolutional neural networks for modeling sentences and documents", |
|
"authors": [ |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Honglak", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dragomir", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Radev", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "NAACL HLT 2016", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rui Zhang, Honglak Lee, and Dragomir R. Radev. Dependency sensitive convolutional neural networks for modeling sentences and documents. In NAACL HLT 2016, 2016.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Adaptive learning of local semantic and global structure representations for text classification", |
|
"authors": [ |
|
{ |
|
"first": "Jianyu", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiqiang", |
|
"middle": [], |
|
"last": "Zhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qichuan", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Changjian", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhensheng", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jianyu Zhao, Zhiqiang Zhan, Qichuan Yang, Yang Zhang, Changjian Hu, Zhensheng Li, Liuxin Zhang, and Zhiqiang He. Adaptive learning of local seman- tic and global structure representations for text clas- sification. In Proceedings of the 27th International Conference on Computational Linguistics, COLING 2018, Santa Fe, New Mexico, USA, August 20-26, 2018, 2018.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Figure 1: The architecture of HCapsNet" |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Examples of routing results for SST-2." |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Illustrating the effect of the hyperplane." |
|
}, |
|
"TABREF0": { |
|
"num": null, |
|
"text": "Table 1: Experimental results of our model compared with other models. Performance is measured in accuracy (%). Models are divided into 3 categories. The first part is baseline methods including SVM and Naive Bayes and their variations. The second part contains models about recurrent neural networks. The third part contains models about convolutional neural networks.", |
|
"content": "<table><tr><td/><td/><td/><td>.8</td><td>41.0</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td colspan=\"3\">NBSVM-bi [Wang and Manning, 2012b]</td><td>-</td><td>-</td><td colspan=\"2\">79.4 93.2</td><td>-</td><td>-</td></tr><tr><td colspan=\"2\">Standard-LSTM</td><td/><td>80.6</td><td>45.3</td><td colspan=\"2\">75.9 89.3</td><td>86.8</td><td>86.1</td></tr><tr><td>bi-LSTM</td><td/><td/><td>83.2</td><td>46.7</td><td colspan=\"2\">79.3 90.5</td><td>89.6</td><td>88.2</td></tr><tr><td colspan=\"3\">RCNN [Lai et al., 2015]</td><td>-</td><td>47.21</td><td>-</td><td>-</td><td>-</td></tr><tr><td colspan=\"3\">SNN [Zhao et al., 2018]</td><td>-</td><td>50.4</td><td colspan=\"2\">82.1 93.9</td><td>96</td><td>-</td></tr><tr><td colspan=\"3\">CNN-non-static [Kim, 2014]</td><td>87.2</td><td>48.0</td><td colspan=\"2\">81.5 93.4</td><td>93.6</td><td>92.3</td></tr><tr><td colspan=\"3\">VD-CNN [Schwenk et al., 2017]</td><td>-</td><td>-</td><td>-</td><td>88.2</td><td>85.4</td><td>91.3</td></tr><tr><td colspan=\"3\">CL-CNN [Zhang et al., 2015a]</td><td>-</td><td>-</td><td>-</td><td>88.4</td><td>85.7</td><td>92.3</td></tr><tr><td colspan=\"3\">Capsule-B [Yang et al., 2018]</td><td>86.8</td><td>-</td><td colspan=\"2\">82.3 93.8</td><td>93.2</td><td>92.6</td></tr><tr><td>HCapsNet</td><td/><td/><td>88.7</td><td>50.8</td><td colspan=\"2\">83.5 94.2</td><td>94.2</td><td>93.5</td></tr><tr><td colspan=\"4\">Dataset Class Len V Train Dev Test</td><td/><td/><td/></tr><tr><td>SST-2</td><td>2</td><td colspan=\"2\">54 16185 6920 872 1821</td><td/><td/><td/></tr><tr><td>SST-5</td><td>5</td><td colspan=\"2\">54 17836 8544 1101 2210</td><td/><td/><td/></tr><tr><td>MR</td><td>2</td><td colspan=\"2\">58 18765 9596 -1066</td><td/><td/><td/></tr><tr><td>Subj</td><td colspan=\"3\">2 121 21323 9000 -1000</td><td/><td/><td/></tr><tr><td>TREC</td><td>6</td><td colspan=\"2\">37 9592 5452 -500</td><td/><td/><td/></tr><tr><td colspan=\"4\">AG's news 4 197 51379 120k -7.6k</td><td/><td/><td/></tr></table>", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"text": "Summary statistics for the datasets.", |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |