{ "paper_id": "D19-1017", "header": { "generated_with": "S2ORC 1.0.0", "date_generated": "2023-01-19T15:59:06.781527Z" }, "title": "Interpretable Relevant Emotion Ranking with Event-Driven Attention", "authors": [ { "first": "Yang", "middle": [], "last": "Yang", "suffix": "", "affiliation": { "laboratory": "Key Laboratory of Computer Network and Information Integration", "institution": "Southeast University", "location": { "country": "China" } }, "email": "" }, { "first": "Deyu", "middle": [], "last": "Zhou", "suffix": "", "affiliation": { "laboratory": "Key Laboratory of Computer Network and Information Integration", "institution": "Southeast University", "location": { "country": "China" } }, "email": "d.zhou@seu.edu.cn" }, { "first": "Yulan", "middle": [], "last": "He", "suffix": "", "affiliation": { "laboratory": "", "institution": "University of Warwick", "location": { "country": "UK" } }, "email": "y.he@cantab" }, { "first": "Meng", "middle": [], "last": "Zhang", "suffix": "", "affiliation": { "laboratory": "Key Laboratory of Computer Network and Information Integration", "institution": "Southeast University", "location": { "country": "China" } }, "email": "m.zhang@seu.edu.cn." } ], "year": "", "venue": null, "identifiers": {}, "abstract": "Multiple emotions with different intensities are often evoked by events described in documents. Oftentimes, such event information is hidden and needs to be discovered from texts. Unveiling the hidden event information can help to understand how the emotions are evoked and provide explainable results. However, existing studies often ignore the latent event information. In this paper, we proposed a novel interpretable relevant emotion ranking model with the event information incorporated into a deep learning architecture using the event-driven attentions. Moreover, corpuslevel event embeddings and document-level event distributions are introduced respectively to consider the global events in corpus and the document-specific events simultaneously. Experimental results on three real-world corpora show that the proposed approach performs remarkably better than the state-of-the-art emotion detection approaches and multi-label approaches. Moreover, interpretable results can be obtained to shed light on the events which trigger certain emotions.", "pdf_parse": { "paper_id": "D19-1017", "_pdf_hash": "", "abstract": [ { "text": "Multiple emotions with different intensities are often evoked by events described in documents. Oftentimes, such event information is hidden and needs to be discovered from texts. Unveiling the hidden event information can help to understand how the emotions are evoked and provide explainable results. However, existing studies often ignore the latent event information. In this paper, we proposed a novel interpretable relevant emotion ranking model with the event information incorporated into a deep learning architecture using the event-driven attentions. Moreover, corpuslevel event embeddings and document-level event distributions are introduced respectively to consider the global events in corpus and the document-specific events simultaneously. Experimental results on three real-world corpora show that the proposed approach performs remarkably better than the state-of-the-art emotion detection approaches and multi-label approaches. Moreover, interpretable results can be obtained to shed light on the events which trigger certain emotions.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Abstract", "sec_num": null } ], "body_text": [ { "text": "The advent and prosperity of social media enable users to share their opinions, feelings and attitudes online. Apart from directly expressing their opinions on social media posts, users can also vote for their emotional states after reading an article online. An example of a news article crawled from Sina News Society Channel together with its associated emotion votes received from readers is illustrated in Figure 1 . Treating these emotion votes as labels for the news article, we can define the emotion detection problem as an emotion ranking problem that ranks emotions based on their intensities. Moreover, some of the emotion labels could be considered as irrelevant emotions. For example, the emotion categories 'Moved', 'Funny' and 'Strange' in Figure 1 only received one or two votes. These emotion votes could be noises (e.g., readers accidentally clicked on a wrong emotion button) and hence can be considered as irrelevant emotions. We need to separate the relevant emotions from irrelevant ones and only predict the ranking results for the relevant emotion labels. Therefore, the task we need to perform is the relevant emotion ranking. Understanding and automatically ranking users' emotional states would be potentially useful for downstream applications such as dialogue systems (Picard and Picard, 1997) . Multiple emotion detection from texts has been previously addressed in (Zhou et al., 2016) which predicted multiple emotions with different intensities based on emotion distribution learning. A relevant emotion ranking framework was proposed in (Yang et al., 2018) to predict multiple relevant emotions as well as the rankings based on their intensities. However, existing emotion detection approaches do not model the events in texts which are crucial for emotion detection. Moreover, most of the existing approaches only produce emotion classification or ranking results, and they do not provide interpretations such as identifying which event triggers a certain emotion.", "cite_spans": [ { "start": 1298, "end": 1323, "text": "(Picard and Picard, 1997)", "ref_id": "BIBREF12" }, { "start": 1397, "end": 1416, "text": "(Zhou et al., 2016)", "ref_id": "BIBREF29" }, { "start": 1571, "end": 1590, "text": "(Yang et al., 2018)", "ref_id": "BIBREF22" } ], "ref_spans": [ { "start": 411, "end": 419, "text": "Figure 1", "ref_id": "FIGREF0" }, { "start": 756, "end": 764, "text": "Figure 1", "ref_id": "FIGREF0" } ], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "We argue that emotions may be evoked by latent events in texts. Let us refer back to the example shown in Figure 1 and read the text more carefully. We notice that words such as 'beat', 'child' and 'stick' marked in red are event-related words indicating the event of \"child abuse\" which may evoke the emotions of \"Anger\", \"Sadness\" and \"Shock\".", "cite_spans": [], "ref_spans": [ { "start": 106, "end": 114, "text": "Figure 1", "ref_id": "FIGREF0" } ], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "The above example shows that it is important to simultaneously consider the latent events in texts for relevant emotion ranking. In this paper we proposed an interpretable relevant emotion ranking model with event-driven attention (IRER-EA). We focus on relevant emotion ranking (RER) by discriminating relevant emotions from irrelevant ones and only learn the rankings of the relevant emotions based on their intensities.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "Our main contributions are summarized below:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "\u2022 A novel interpretable relevant emotion ranking model with event-driven attention (IRER-EA) is proposed. The latent event information is incorporated into a deep learning architecture through event-driven attentions which can provide clues of how the emotions are evoked with interpretable results. To the best of our knowledge, it is the first deep event-driven neural approach for RER.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "\u2022 To consider event information comprehensively, corpus-level event embeddings are incorporated to consider global events in corpus and document-level event distributions are incorporated to learn document-specific event-related attention respectively.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "\u2022 Experimental results on three different realworld corpora show that the proposed method performs better than the state-of-the-art emotion detection methods and multi-label learning methods. Moreover, the event-driven attention enables dynamically highlighting important event-related parts evoking the emotions in texts.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "In general, emotion detection methods can mainly be categorized into two classes: lexicon-based methods and learning-based methods. Lexiconbased approaches utilize emotion lexicons including emotion words and their emotion labels for detecting emotions from texts. For example, emotion lexicons are used in (Aman and Szpakowicz, 2007) to distinguish emotional and non-emotional sentences. Emotion dictionaries could also be used to predict the readers' emotion of new articles (Rao et al., 2012; Lei et al., 2014) . proposed a model with several constraints using non-negative matrix factorization based on emotion lexicon for multiple emotion detection. However, these approaches often suffer from low recall.", "cite_spans": [ { "start": 307, "end": 334, "text": "(Aman and Szpakowicz, 2007)", "ref_id": "BIBREF0" }, { "start": 477, "end": 495, "text": "(Rao et al., 2012;", "ref_id": "BIBREF17" }, { "start": 496, "end": 513, "text": "Lei et al., 2014)", "ref_id": "BIBREF6" } ], "ref_spans": [], "eq_spans": [], "section": "Related Work", "sec_num": "2" }, { "text": "Learning-based approaches can be further categorized into unsupervised and supervised learning methods. Unsupervised learning approaches do not require labeled training data (Blei et al., 2003) . Supervised learning methods typically frame emotion detection as a classification problem by training supervised classifiers from texts with emotion categories Wang and Pal, 2015; Rao, 2016) . Lin et al. (2008) studied the readers' emotion detection with various kinds of feature sets on news articles. Quan et al. (2015) detected emotions from texts with a logistic regression model introducing the intermediate hidden variables to model the latent structure of input text corpora. Zhou et al. (2016) predicted multiple emotions with intensities based on emotion distribution learning. A relevant label ranking framework for emotion detection was proposed to predict multiple relevant emotions as well as the rankings of emotions based on their intensities (Yang et al., 2018; . However, these approaches do not model the latent events in texts.", "cite_spans": [ { "start": 174, "end": 193, "text": "(Blei et al., 2003)", "ref_id": "BIBREF1" }, { "start": 356, "end": 375, "text": "Wang and Pal, 2015;", "ref_id": "BIBREF21" }, { "start": 376, "end": 386, "text": "Rao, 2016)", "ref_id": "BIBREF15" }, { "start": 389, "end": 406, "text": "Lin et al. (2008)", "ref_id": "BIBREF7" }, { "start": 499, "end": 517, "text": "Quan et al. (2015)", "ref_id": "BIBREF14" }, { "start": 679, "end": 697, "text": "Zhou et al. (2016)", "ref_id": "BIBREF29" }, { "start": 954, "end": 973, "text": "(Yang et al., 2018;", "ref_id": "BIBREF22" } ], "ref_spans": [], "eq_spans": [], "section": "Related Work", "sec_num": "2" }, { "text": "In recent years, deep neural network models have been widely used for text classification. In particular, the attention-based recurrent neural networks (RNNs) (Schuster and Paliwal, 2002; Yang et al., 2016) prevail in text classification. However, these approaches ignore the latent events in texts thus fail to attend on event-related parts. Moreover, they are lack of interpretation.", "cite_spans": [ { "start": 159, "end": 187, "text": "(Schuster and Paliwal, 2002;", "ref_id": "BIBREF18" }, { "start": 188, "end": 206, "text": "Yang et al., 2016)", "ref_id": "BIBREF23" } ], "ref_spans": [], "eq_spans": [], "section": "Related Work", "sec_num": "2" }, { "text": "Our work is partly inspired by (Yang et al., 2018) for relevant emotion ranking, but with the following significant differences: (1) our model incorporates corpus-level event embeddings and document-level event distributions by an eventdriven attention mechanism attending to eventrelated words, which are ignored in the mod- Figure 2 : The IRER-EA model.", "cite_spans": [ { "start": 31, "end": 50, "text": "(Yang et al., 2018)", "ref_id": "BIBREF22" } ], "ref_spans": [ { "start": 326, "end": 334, "text": "Figure 2", "ref_id": null } ], "eq_spans": [], "section": "Related Work", "sec_num": "2" }, { "text": "Emotion RER Loss 1 \u22ef 2 \u22ef Word Attention \u22ef 1 \u22ef 2 1 \u22ef 2 \u22ef Event-driven Attention \u22ef 1 \u22ef 2 \u22ef 1 2 \u22ef Event Distribution \u210e 1 \u22ef \u210e 2 \u210e \u22ef \u210e Softmax Input", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Related Work", "sec_num": "2" }, { "text": "el (Yang et al., 2018 ) simply using a Kullback-Leibler (KL) divergence to approximatively learning the documents' topic distributions;", "cite_spans": [ { "start": 3, "end": 21, "text": "(Yang et al., 2018", "ref_id": "BIBREF22" } ], "ref_spans": [], "eq_spans": [], "section": "Related Work", "sec_num": "2" }, { "text": "(2) our model incorporates the event information into a deep learning architecture thus can consider the sequential information of texts which is ignored in the model (Yang et al., 2018) based on shallow bag-of-words representations.", "cite_spans": [ { "start": 167, "end": 186, "text": "(Yang et al., 2018)", "ref_id": "BIBREF22" } ], "ref_spans": [], "eq_spans": [], "section": "Related Work", "sec_num": "2" }, { "text": "3 The IRER-EA Model", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Related Work", "sec_num": "2" }, { "text": "Assuming a set of T emotions, L = {l 1 , l 2 , ...l T }, and a set of", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Problem Setting", "sec_num": "3.1" }, { "text": "Q document instances, D = {d 1 , d 2 , d 3 , ..., d Q }, each instance d i is associat-", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Problem Setting", "sec_num": "3.1" }, { "text": "ed with a list of its relevant emotions R i \u2286 L ranked by their intensities and also a list of irrelevant emotions", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Problem Setting", "sec_num": "3.1" }, { "text": "R i = L \u2212 R i . Relevant emotion ranking aims to learn a score function g(d i ) = [g 1 (d i ), ..., g T (d i )] which assigns a score g j (d i ) to each emotion l j , (j \u2208 {1, ..., T }).", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Problem Setting", "sec_num": "3.1" }, { "text": "Relevant emotions and their rankings can be obtained simultaneously according to the scores assigned by the learned ranking function g.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Problem Setting", "sec_num": "3.1" }, { "text": "The learning objective of relevant emotion ranking (RER) is to both discriminate relevant emotions from irrelevant ones and to rank relevant emotions according to their intensities. Therefore, to fulfil the requirements of RER, the global ob-jective function is defined as follows:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Problem Setting", "sec_num": "3.1" }, { "text": "E = Q i=1 lt\u2208R i ls\u2208\u227a(lt) 1 norm t,s [ exp(\u2212(g t (d i ) \u2212 g s (d i )))+ \u03c9 ts (g t (d i ) \u2212 g s (d i )) 2 ] (1)", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Problem Setting", "sec_num": "3.1" }, { "text": "Here, l s \u2208\u227a (l t ) represents that emotion l s is less relevant than emotion l t . The normalization term norm t,s is used to avoid terms dominated by the sizes of emotion pairs. The term", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Problem Setting", "sec_num": "3.1" }, { "text": "g t (d i ) \u2212 g s (d i )", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Problem Setting", "sec_num": "3.1" }, { "text": "measures the difference between two emotions. \u03c9 ts represents the relationship between two emotions l t and l s which is calculated by Pearson correlation coefficient (Nicewander, 1988) .", "cite_spans": [ { "start": 167, "end": 185, "text": "(Nicewander, 1988)", "ref_id": "BIBREF9" } ], "ref_spans": [], "eq_spans": [], "section": "Problem Setting", "sec_num": "3.1" }, { "text": "We present the overall architecture of the proposed interpretable relevant emotion ranking with event-driven attention (IRER-EA) model in Figure 2. It consists of four layers: (1) the Input Embedding Layer including both word embeddings and event embeddings; (2) the Encoder Layer including both the word encoder and the event encoder; (3) the Attention Layer which computes the word-level attention scores and the event-driven attention scores taking into account of the corpuslevel and document-level event information, respectively; (4) the Output Layer which generates the emotion ranking results.", "cite_spans": [], "ref_spans": [ { "start": 138, "end": 144, "text": "Figure", "ref_id": null } ], "eq_spans": [], "section": "Problem Setting", "sec_num": "3.1" }, { "text": "The Input Embedding Layer contains word embeddings and event embeddings. Assuming a document d i consisting of N words represented as", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Input Embedding Layer", "sec_num": "3.2" }, { "text": "d i = {w 1 , w 2 , .", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Input Embedding Layer", "sec_num": "3.2" }, { "text": ".., w N }, the pre-trained word vector, GloVe (Pennington et al., 2014) , is used to obtain the fixed word embedding of each word and d i can be represented as Figure 2 .", "cite_spans": [ { "start": 46, "end": 71, "text": "(Pennington et al., 2014)", "ref_id": "BIBREF11" } ], "ref_spans": [ { "start": 160, "end": 168, "text": "Figure 2", "ref_id": null } ], "eq_spans": [], "section": "Input Embedding Layer", "sec_num": "3.2" }, { "text": "d i = {x 1 , x 2 , ..., x N } as shown in", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Input Embedding Layer", "sec_num": "3.2" }, { "text": "Since nouns and verbs are more important than other word types in referring to specific events, they are utilized as inputs of topic model such as Latent Dirichlet Allocation (LDA) (Blei et al., 2003) to generate events automatically. Therefore, the granularity of extracted events is controlled by the predefined K, the number of events. For the corpus D consisting of K events {e 1 , e 2 , ..., e K }, the event embedding of the kth event e k can be obtained from the output event-word distribution matrix E of the topic model. For the single docu-", "cite_spans": [ { "start": 181, "end": 200, "text": "(Blei et al., 2003)", "ref_id": "BIBREF1" } ], "ref_spans": [], "eq_spans": [], "section": "Input Embedding Layer", "sec_num": "3.2" }, { "text": "ment d i , the event distribution p = (p 1 , p 2 , ..., p K )", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Input Embedding Layer", "sec_num": "3.2" }, { "text": "obtained from the topic model represents the probability of the text expressing each event.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Input Embedding Layer", "sec_num": "3.2" }, { "text": "\u210e 1 \u22ef \u210e 2 \u210e 3 \u210e 4 \u210e \u22ef \u210e 1 2 \u2026 1 \u22ef Figure 3: Word Encoder.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Encoder Layer", "sec_num": "3.3" }, { "text": "The Encoder Layer contains both the word encoder and event encoder. As for the word encoder, an alternative RNN structure (Zhang et al., 2018) is used to encode texts into semantic representations since it has been shown to be more effective in encoding longer texts. For document d i , formally, a state at time step t can be denoted by:", "cite_spans": [ { "start": 122, "end": 142, "text": "(Zhang et al., 2018)", "ref_id": "BIBREF27" } ], "ref_spans": [], "eq_spans": [], "section": "Encoder Layer", "sec_num": "3.3" }, { "text": "H t =< h t 1 , ..., h t N , h t q > (2)", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Encoder Layer", "sec_num": "3.3" }, { "text": "which consists of sub-states h t i for the ith word w i in document d i and a document-level sub-state h t q as shown in Figure 3 . The hidden states are independent of each other at the present recurrent step and are connected across recurrent steps, which can capture long-range dependencies. The recurrent state transition process is used to model information exchange between those sub-states to enrich state representations incrementally. The state transition is similar to LSTM (Hochreiter and Schmidhuber, 1997 ) and a recurrent cell c t i for each word w i and a cell c t q for document-level substate h q are used. The value of each h t i is computed based on the values of", "cite_spans": [ { "start": 484, "end": 517, "text": "(Hochreiter and Schmidhuber, 1997", "ref_id": "BIBREF4" } ], "ref_spans": [ { "start": 121, "end": 129, "text": "Figure 3", "ref_id": null } ], "eq_spans": [], "section": "Encoder Layer", "sec_num": "3.3" }, { "text": "x i , h t\u22121 i\u22121 , h t\u22121 i , h t\u22121 i+1 , h t\u22121 q", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Encoder Layer", "sec_num": "3.3" }, { "text": "at two adjacent recurrent time steps. Note that the number of window size between two adjacent steps can be set manually. Hence, the hidden sub-states h i for individual words w i and a global document hidden state h q for d i are obtained.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Encoder Layer", "sec_num": "3.3" }, { "text": "As for event encoder, event representations are produced by the ReLU-actived neural perceptrons taking the event-word weight matrix E \u2208 V \u00d7 K as inputs. Hence, each event representation s k representing event k is obtained according to the event embedding e k , k \u2208 {1, 2, 3, ..., K}.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Encoder Layer", "sec_num": "3.3" }, { "text": "Given a word w n in document d i , h n is the hidden representation of w n after encoder. Given an event embedding e k in the corpus, s k is the event representation of e k generated by the event encoder. Then we utilize attention weights to enhance the word representations and event representations from different aspects.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Attention Layer", "sec_num": "3.4" }, { "text": "Our model contains two kinds of attention mechanisms including the word-level attentions and the event-driven attentions.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Attention Layer", "sec_num": "3.4" }, { "text": "As for word-level attentions, since not all words contribute equally to the meaning of a document, we introduce an attention mechanism to extract words with greater importance and aggregate the representations of those informative words to form the document representation, which is shown in the left part of Figure 2 . More concretely,", "cite_spans": [], "ref_spans": [ { "start": 309, "end": 317, "text": "Figure 2", "ref_id": null } ], "eq_spans": [], "section": "Word-Level Attention", "sec_num": "3.4.1" }, { "text": "\u03d5 w i = tanh(W w (h i + h q ) + b w ) a w i = exp(\u03d5 w i u w ) i exp(\u03d5 w i u w ) r w = i a w i h i (3)", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Word-Level Attention", "sec_num": "3.4.1" }, { "text": "where the weight a w i is the attention of the word w i and W w , b w and u w are parameters similar to (Pappas and Popescu-Belis, 2017) . Note that we further incorporate the global information of the document representation h q obtained from the encoder to strengthen the word attention.", "cite_spans": [ { "start": 104, "end": 136, "text": "(Pappas and Popescu-Belis, 2017)", "ref_id": "BIBREF10" } ], "ref_spans": [], "eq_spans": [], "section": "Word-Level Attention", "sec_num": "3.4.1" }, { "text": "In our model, we use the event-driven attention mechanism to attend to event-related words, which can discover words more important for textrelated events. The event-driven attention leverages the corpus-level event information based on each event representation s k , k \u2208 {1, 2, 3, ..., K} obtained from the corpus and the document-level event information based on the document's event distribution p = (p 1 , p 2 , ..., p K ).", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Event-Driven Attention", "sec_num": "3.4.2" }, { "text": "The model utilizes the corpus-level event information by a joint attention mechanism to consider global events in corpus, which aggregates the semantic representations h = (h 1 , h 2 , ..., h N ) of an input text obtained and measures the interaction the words in the text with the event representations s = (s 1 , s 2 , ..., s K ) by the event-driven attention. The corpus-level event-driven attention is calculated as follows:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Corpus-level Event-Driven Attention", "sec_num": null }, { "text": "EQUATION", "cite_spans": [], "ref_spans": [], "eq_spans": [ { "start": 0, "end": 8, "text": "EQUATION", "ref_id": "EQREF", "raw_str": "\u03d5 c = tanh(W c h + b c ) m c k = (\u03d5 c ) s k a c = sof tmax( K k=1 m c k ) r c = (a c ) h", "eq_num": "(4)" } ], "section": "Corpus-level Event-Driven Attention", "sec_num": null }, { "text": "where h = (h 1 , h 2 , ..., h N ) stands for the combination of all the hidden states of words in the document and W c and b c are parameters needed to be learnt for corpus-level event-driven attention. \u03d5 c = (\u03d5 c 1 , \u03d5 c 2 , ..., \u03d5 c N ) refers to the hidden representation of state h through a fully connected layer. Given the event representation s k , we measure the interaction of the words in the document and the event by an attention weight vector m c k which can be computed as the inner product of event s k and \u03d5 c followed by a softmax layer. a c = (a c 1 , a c 2 , ..., a c N ) stands for the average attention weights of all the events for words which contribute to discover event keywords of a document according to different events in corpus. Then we construct the text representation r c with the sum of hidden states weighted by a c . Document-level Event-driven Attention", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Corpus-level Event-Driven Attention", "sec_num": null }, { "text": "We further incorporate the document-level event-driven attention mechanism. Our model can attend to the event distributions of the current document in order to strengthen the effect of the current document expressing each event and learn document-specific event related attention. For each document, p = (p 1 , p 2 , ..., p K ) denotes the event distributions of the document, with each dimension representing the level of prominence of the corresponding event occurred in the document. The corpus-level event-driven attention weights can be further strengthened by including document-level event distributions. The document-level event-driven attention is calculated as follows:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Corpus-level Event-Driven Attention", "sec_num": null }, { "text": "\u03d5 d = tanh(W d h + b d ) m d k = (\u03d5 d ) s k a e = sof tmax( K k=1 m d k p k )", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Corpus-level Event-Driven Attention", "sec_num": null }, { "text": "r e = (a e ) h", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Corpus-level Event-Driven Attention", "sec_num": null }, { "text": "where h = (h 1 , h 2 , ..., h N ) stands for the aggregation of all the hidden states of words in the document and W d and b d are parameters needed to be learnt for the document-level event-driven attention.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Corpus-level Event-Driven Attention", "sec_num": null }, { "text": "\u03d5 d = (\u03d5 d 1 , \u03d5 d 2 , ..., \u03d5 d N )", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Corpus-level Event-Driven Attention", "sec_num": null }, { "text": "refers to the hidden representation of state h through a fully connected layer. m d k represents the interaction of the words in the document and the event which can be computed as the inner product of event s k and \u03d5 d . Then m d k is weighted by the document-level event distribution, p = (p 1 , p 2 , ..., p K ), followed by a softmax layer, and a e = (a e 1 , a e 2 , ..., a e N ) stands for the attention weight after incorporating the document-level event distributions. Then we construct the text representation r e with the sum of hidden states weighted by a e . Finally, r e is used as the final text representation obtained by the event-driven attention which simultaneously takes into account both the corpus-level event information and the document-level event information.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Corpus-level Event-Driven Attention", "sec_num": null }, { "text": "At last, we concatenate both the representations calculated by the word-level attention and the event-driven attention to obtain the final representation r = [r w , r e ], which is fed to a multi-layer perceptron and a softmax layer for identifying relevant emotions and their rankings.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Output Layer", "sec_num": "3.5" }, { "text": "To evaluate our proposed approach, we conducted experiments on the following three corpora: Sina Social News (News) (Zhou et al., 2018) consists of 5,586 news articles collected from the Sina news Society channel. Each document was kept together with the readers' emotion votes of the six emotions including Funny, Moved, Angry, Sad, Strange, and Shocked. Ren-CECps corpus (Blogs) (Quan and Ren, 2010) is a Chinese data set containing 1,487 blogs annotated with eight basic emotions from writer's perspective, including Anger, Anxiety, Expect, Hate, Joy, Love, Sorrow and Surprise. The emotions are represented by their emotion scores in the range of [0, 1]. Higher scores represent higher emotion intensities. SemEval (Strapparava and Mihalcea, 2007) contains 1,250 English news headlines extracted from Google news, CNN, and many other portals, which are manually annotated with a fine-grained valence scale of 0 to 100 across 6 emotions, including Anger, Disgust, Fear, Joy, Sad and Surprise. The statistics for the three corpora used in our experiments are shown in Table 1 .", "cite_spans": [ { "start": 719, "end": 751, "text": "(Strapparava and Mihalcea, 2007)", "ref_id": "BIBREF20" } ], "ref_spans": [ { "start": 1070, "end": 1077, "text": "Table 1", "ref_id": "TABREF2" } ], "eq_spans": [], "section": "Experiments", "sec_num": "4" }, { "text": "In our experiments, the News and Blog corpora were preprocessed using the python jieba segmenter 1 for word segmentation and filtering. The third corpus SemEval is in English and was tokenized by white space. Stanford CoreNLP 2 was applied for parts of speech tagging to obtain the nouns and verbs of the documents. Stop words and words appeared less than twice were removed from documents. We used the pre-trained Chinese GloVe and English GloVe 3 vectors as the word embeddings in the experiments and the dimension of the word embeddings was 300.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Experiments", "sec_num": "4" }, { "text": "1 https://github.com/fxsjy/jieba 2 https://stanfordnlp.github.io/ CoreNLP/index.html 3 https://nlp.stanford.edu/projects/ glove/ l t,s is a modified 0-1 error;", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Experiments", "sec_num": "4" }, { "text": "norm t,s is the set size of label pair(e t , e s )", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Experiments", "sec_num": "4" }, { "text": "Hamming Loss", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Experiments", "sec_num": "4" }, { "text": "1 nT n i=1 |R i R i | The predicted relevant emotions:R i . Ranking Loss 1 n n i=1 ( (e t ,es)\u2208R i \u00d7R i \u03b4[gt(xi)gt(xi)}| |{es|gs(xi)>gt(xi)}| Coverage 1 n n i=1 max t:et\u2208Ri |{e s |g s (x i ) > g t (x i )}| F 1 exam 1 n n i=1 2|Ri\u2229Ri| (|Ri|+|Ri|)", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Experiments", "sec_num": "4" }, { "text": "F 1( ", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "MicroF1", "sec_num": null }, { "text": "T t=1 T P t , T t=1 F P t , T t=1 T N t , T t=1 F N t ) MacroF1 1 T T t=1 F 1(T P t , F P t , T N t , F N t )", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "MicroF1", "sec_num": null }, { "text": "F 1(T P t , F P t , T N t , F N t )", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "MicroF1", "sec_num": null }, { "text": "represent specific binary classification metric F1 (Manning et al., 2008) .", "cite_spans": [ { "start": 51, "end": 73, "text": "(Manning et al., 2008)", "ref_id": "BIBREF8" } ], "ref_spans": [], "eq_spans": [], "section": "MicroF1", "sec_num": null }, { "text": "The event embeddings and event distributions used in the proposed method are derived in different ways. For long documents including News and Blogs, LDA was employed to generate event embeddings and event distributions using verbs and nouns as the input. For short texts in SemEval with the sparsity problem, Bi-term Topic Model (BTM) (Cheng et al., 2014) was chosen. The number of topics was 60. The parameters were chosen from the validation set which is 10% of the training set. The encoder was trained using a learning rate of 0.001, a dropout rate of 0.5, a window size of 1 and a layer number of 3. The number of epochs was 10 and the mini batch (Cotter et al., 2011 ) size was 16. For each method, 10-fold cross validation was conducted to get the final results.", "cite_spans": [ { "start": 335, "end": 355, "text": "(Cheng et al., 2014)", "ref_id": "BIBREF2" }, { "start": 652, "end": 672, "text": "(Cotter et al., 2011", "ref_id": "BIBREF3" } ], "ref_spans": [], "eq_spans": [], "section": "MicroF1", "sec_num": null }, { "text": "The baselines can be categorized into two classes, emotion detection methods and multi-label methods. Most these baselines are either reimplemented or cited from published papers. For instance, the results of multi-label methods are reimplemented, since they are not proposed for relevant emotion ranking. The performances of some emotion detection methods, such as EDL, EmoDetect, RER and INNRER, are cited from the pub- represents Hamming Loss, 'RL' represents ranking loss, 'OE' represents one error, 'AP' represent average precision, 'Cov' represent coverage, 'F1' represents F 1 exam , 'MiF1' represents MicroF1, 'MaF1' represents MacroF1. \"\u2193\" indicates \"the smaller the better\", while \"\u2191\" indicates \"the larger the better\". The best performance on each evaluation measure is highlighted by boldface.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "MicroF1", "sec_num": null }, { "text": "lished paper (Yang et al., 2018) as they use the same experimental data as ours.", "cite_spans": [ { "start": 13, "end": 32, "text": "(Yang et al., 2018)", "ref_id": "BIBREF22" } ], "ref_spans": [], "eq_spans": [], "section": "MicroF1", "sec_num": null }, { "text": "Evaluation metrics typically used in multi-label learning and label ranking are employed in our experiments which are different from those of classical single-label learning systems (Sebastiani, 2001 ). The detailed explanations of evaluation metrics are presented in Table 2 .", "cite_spans": [ { "start": 182, "end": 199, "text": "(Sebastiani, 2001", "ref_id": "BIBREF19" } ], "ref_spans": [ { "start": 268, "end": 275, "text": "Table 2", "ref_id": "TABREF4" } ], "eq_spans": [], "section": "MicroF1", "sec_num": null }, { "text": "There are several emotion detection approaches addressing multiple emotions detection from texts.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Compared Methods", "sec_num": "4.1" }, { "text": "\u2022 Emotion Distribution Learning(EDL) (Zhou et al., 2016 ) learns a mapping function from sentences to their emotion distributions.", "cite_spans": [ { "start": 37, "end": 55, "text": "(Zhou et al., 2016", "ref_id": "BIBREF29" } ], "ref_spans": [], "eq_spans": [], "section": "Compared Methods", "sec_num": "4.1" }, { "text": "\u2022 EmoDetect (Wang and Pal, 2015 ) employs a constraint optimization framework with several constraints to obtain multiple emotions.", "cite_spans": [ { "start": 12, "end": 31, "text": "(Wang and Pal, 2015", "ref_id": "BIBREF21" } ], "ref_spans": [], "eq_spans": [], "section": "Compared Methods", "sec_num": "4.1" }, { "text": "\u2022 RER uses support vector machines to predict relevant emotions and rankings in one text based on their intensities.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Compared Methods", "sec_num": "4.1" }, { "text": "\u2022 INN-RER (Yang et al., 2018 ) designs a three-layer network combined with topic model to solve relevant emotion ranking.", "cite_spans": [ { "start": 10, "end": 28, "text": "(Yang et al., 2018", "ref_id": "BIBREF22" } ], "ref_spans": [], "eq_spans": [], "section": "Compared Methods", "sec_num": "4.1" }, { "text": "Relevant emotion ranking can be treated as an extension of multi-label learning, so we also com- Table 4 : Comparison of different IRER-EA components.\"\u2193\" indicates \"the smaller the better\", while \"\u2191\" indicates \"the larger the better\". The best performance on each evaluation measure is highlighted by boldface.", "cite_spans": [], "ref_spans": [ { "start": 97, "end": 104, "text": "Table 4", "ref_id": null } ], "eq_spans": [], "section": "Compared Methods", "sec_num": "4.1" }, { "text": "pare the proposed IRER-EA with several widelyused multi-label learning methods.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Compared Methods", "sec_num": "4.1" }, { "text": "\u2022 LIFT (Zhang, 2011) constructs features specific to each label. \u2022 BP-MLL (Zhang and Zhou, 2006 ) employs a novel error function into back propagation algorithm to capture the characteristics of multi-label learning. For the MLL methods, linear kernel is used in LIFT. Rank-SVM uses the RBF kernel with the width \u03c3 equals to 1.", "cite_spans": [ { "start": 7, "end": 20, "text": "(Zhang, 2011)", "ref_id": "BIBREF24" }, { "start": 74, "end": 95, "text": "(Zhang and Zhou, 2006", "ref_id": "BIBREF25" } ], "ref_spans": [], "eq_spans": [], "section": "Compared Methods", "sec_num": "4.1" }, { "text": "Experimental results on three corpora are shown in Table 3 . It can be summarized from the table that: (1) IRER-EA performs better than stateof-art emotion detection baselines on almost all evaluation metrics across three corpora, which obviously shows the effectiveness of incorporating event information to obtain event-driven attentions for relevant emotion ranking; (2) IRER-EA achieves remarkably better results than MLL methods. It further confirms the effectiveness of IRER-EA, which uses a deep learning architecture incorporating event-driven attention for better performance.", "cite_spans": [], "ref_spans": [ { "start": 51, "end": 58, "text": "Table 3", "ref_id": "TABREF6" } ], "eq_spans": [], "section": "Compared Methods", "sec_num": "4.1" }, { "text": "To further validate the effectiveness of eventdriven attention components, we compare IRER-EA with two sub-networks based on our architectures.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Model Analysis", "sec_num": "4.2" }, { "text": "\u2022 IRER-EA(-EA): removes event-driven attention from IRER-EA.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Model Analysis", "sec_num": "4.2" }, { "text": "\u2022 IRER-EA(-DEA): removes document-level event-driven attention from IRER-EA.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Model Analysis", "sec_num": "4.2" }, { "text": "Experimental results on three corpora are shown in Table 4 . It can be summarized from the table that: (1) All the sub-networks cannot compete with IRER-EA on three corpora, which indicates the corpus-level and document-level event information are effective for relevant emotion ranking task; (2) IRER-EA(-DEA) performs better than IRER-EA(-EA) on most of the evaluation metrics, which verifies the effectiveness of incorporating corpus-level event-driven attention; (3) IRER-EA achieves better results than IRER-EA(-DEA) on almost all the evaluation metrics which further proves the effectiveness of document-level eventdriven attention.", "cite_spans": [], "ref_spans": [ { "start": 51, "end": 58, "text": "Table 4", "ref_id": null } ], "eq_spans": [], "section": "Model Analysis", "sec_num": "4.2" }, { "text": "To further investigate whether the event-driven attention is able to capture the event-associated words in a given document and provide interpretable results, we compare the results of the attention mechanisms of the word-level attentions and event-driven attentions by visualizing the weights of words in the same documents as shown in Figure 4 . As the document of the News corpus and Blogs corpus are too long, we manually simplified the texts for better visualization results and provided English translations of the texts. The words marked in red represent highlyweighted ones according to the event-driven attentions, while the words with blue underlines are the", "cite_spans": [], "ref_spans": [ { "start": 337, "end": 345, "text": "Figure 4", "ref_id": "FIGREF2" } ], "eq_spans": [], "section": "Case Study of Interpretability", "sec_num": "4.3" }, { "text": "News \u53e4\u7434\u53f0\u9644\u8fd1\u4e00\u540d\u5973\u5b50\u8f7b\u751f\u6eba\u4ea1\u3002\u4ece\u5979\u8d70\u8fdb\u6c34\u91cc\u5230\u6700\u7ec8\u6eba\u4ea1\uff0c\u6301\u7eed\u65f6\u95f4\u5c06 \u8fd1\u4e24\u5206\u949f\u3002\u5f53\u65f6\u6709 10 \u4f59\u4eba\u56f4\u89c2\uff0c\u7adf\u65e0\u4eba\u4e0b\u6c34\u65bd\u6551\u3002\u8fd9\u79cd\u73b0\u8c61\u8ba9\u6211\u4eec\u5bd2 \u5fc3\u3002 A woman drowned near Guqin Terrace. It took nearly two minutes from she stepping into the water to her final death. More than 10 people were watching her, but no one came to rescue. Such phenomenon chills us.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Corpora Texts Emotions", "sec_num": null }, { "text": "Blog \u5730\u9707\u8fc7\u540e\u4e24\u767e\u4e2a\u5c0f\u65f6\u91cc\uff0c\u6bcf\u4e00\u5f20\u524d\u6cbf\u7684\u7167\u7247\uff0c\u6bcf\u4e00\u6bb5\u83b7\u6551\u7684\u89c6\u9891\uff0c\u6bcf\u4e2a \u7238\u7238\u5988\u5988\u53d1\u81ea\u5fc3\u5e95\u7684\u75db\u54ed\uff0c\u90fd\u4ee4\u6211\u5fc3\u75db\u3002 During the two hundred hours after the earthquake, each photo and video about rescue process and the crying from the parents make my heart broken.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Angry Sad Shocked", "sec_num": null }, { "text": "SemEval teacher in hide after attack on islam stir threat Fear Sad ones with higher attention weights according to the word-level attentions.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Sorrow Love", "sec_num": null }, { "text": "From the visualization results on an example News article, it can be observed that different from word-level attention that pays more attentions to emotion-associated words, such as 'chill' which may only evoke the emotion \"Sad\", the eventdriven attentions can find words indicating latent events in the document, such as 'drown', 'death', 'no one rescue' which are all closely related to the event \"Suicide without rescue', which may evoke emotions such as \"Angry\" and \"Shocked\". In an example Blog article, word-level attentions highlight emotion-associated words such as 'crying' and 'broken' which may evoke the emotion \"Sorrow\", while event-driven attentions focus on the event-related words such as 'earthquake' and 'rescued' representing the event \"Earthquake Relief '. Finally, in an example from the SemEval corpus, we can see that the word-level attention mechanism only gives a higher attention weight to the word 'threat' and ignores the word 'attack', which is also an important indicator of the emotions \"Fear\" and \"Sad\". On the contrary, the event-driven attention mechanism highlights both 'attack' and 'threat', representing the event \"Terrorist attack'.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Sorrow Love", "sec_num": null }, { "text": "In summary, we can observe from Figure 4 that: (1) Event-driven attention can capture words representing latent events in texts; (2) Compared with the word-level attention which is prone to attend on emotion-associated keywords, eventdriven attention can find words representing one or more hidden events in a document, which can provide more explainable clues of which event triggers certain emotions; (3) Event-driven attention can achieve better performance especially in documents without any emotion-associated words.", "cite_spans": [], "ref_spans": [ { "start": 32, "end": 40, "text": "Figure 4", "ref_id": "FIGREF2" } ], "eq_spans": [], "section": "Sorrow Love", "sec_num": null }, { "text": "In this paper, we have proposed an interpretable relevant emotion ranking model with event-driven attention. The event information is incorporated into a neural model through event-driven attentions which can provide clues of how the emotions are evoked with explainable results. Moreover, corpus-level event embeddings and documentlevel event distributions are incorporated respectively to consider event information comprehensively. Experimental results show that the proposed method performs better than the state-ofthe-art emotion detection methods and multi-label learning methods.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Conclusion", "sec_num": "5" } ], "back_matter": [ { "text": "We would like to thank anonymous reviewers for their valuable comments and helpful suggestions. This work was funded by the National Key Research and Development Program of China (2017YFB1002801), the National Natural Science Foundation of China (61772132), the Natural Science Foundation of Jiangsu Province of China (BK20161430) and Innovate UK (grant no. 103652).", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Acknowledgments", "sec_num": null } ], "bib_entries": { "BIBREF0": { "ref_id": "b0", "title": "Identifying expressions of emotion in text", "authors": [ { "first": "Saima", "middle": [], "last": "Aman", "suffix": "" }, { "first": "Stan", "middle": [], "last": "Szpakowicz", "suffix": "" } ], "year": 2007, "venue": "Lecture Notes in Computer Science", "volume": "4629", "issue": "", "pages": "196--205", "other_ids": {}, "num": null, "urls": [], "raw_text": "Saima Aman and Stan Szpakowicz. 2007. Identify- ing expressions of emotion in text. Lecture Notes in Computer Science, 4629:196-205.", "links": null }, "BIBREF1": { "ref_id": "b1", "title": "Latent dirichlet allocation", "authors": [ { "first": "M", "middle": [], "last": "David", "suffix": "" }, { "first": "", "middle": [], "last": "Blei", "suffix": "" }, { "first": "Y", "middle": [], "last": "Andrew", "suffix": "" }, { "first": "Michael I Jordan", "middle": [], "last": "Ng", "suffix": "" } ], "year": 2003, "venue": "Journal of machine Learning research", "volume": "3", "issue": "", "pages": "993--1022", "other_ids": {}, "num": null, "urls": [], "raw_text": "David M Blei, Andrew Y Ng, and Michael I Jordan. 2003. Latent dirichlet allocation. Journal of ma- chine Learning research, 3(Jan):993-1022.", "links": null }, "BIBREF2": { "ref_id": "b2", "title": "Btm: Topic modeling over short texts", "authors": [ { "first": "Xueqi", "middle": [], "last": "Cheng", "suffix": "" }, { "first": "Xiaohui", "middle": [], "last": "Yan", "suffix": "" }, { "first": "Yanyan", "middle": [], "last": "Lan", "suffix": "" }, { "first": "Jiafeng", "middle": [], "last": "Guo", "suffix": "" } ], "year": 2014, "venue": "IEEE Transactions on Knowledge and Data Engineering", "volume": "26", "issue": "12", "pages": "2928--2941", "other_ids": {}, "num": null, "urls": [], "raw_text": "Xueqi Cheng, Xiaohui Yan, Yanyan Lan, and Jiafeng Guo. 2014. Btm: Topic modeling over short texts. IEEE Transactions on Knowledge and Data Engi- neering, 26(12):2928-2941.", "links": null }, "BIBREF3": { "ref_id": "b3", "title": "Better mini-batch algorithms via accelerated gradient methods", "authors": [ { "first": "Andrew", "middle": [], "last": "Cotter", "suffix": "" }, { "first": "Ohad", "middle": [], "last": "Shamir", "suffix": "" }, { "first": "Nathan", "middle": [], "last": "Srebro", "suffix": "" }, { "first": "Karthik", "middle": [], "last": "Sridharan", "suffix": "" } ], "year": 2011, "venue": "Advances in Neural Information Processing Systems", "volume": "", "issue": "", "pages": "1647--1655", "other_ids": {}, "num": null, "urls": [], "raw_text": "Andrew Cotter, Ohad Shamir, Nathan Srebro, and Karthik Sridharan. 2011. Better mini-batch algo- rithms via accelerated gradient methods. Advances in Neural Information Processing Systems, pages 1647-1655.", "links": null }, "BIBREF4": { "ref_id": "b4", "title": "Long shortterm memory", "authors": [ { "first": "S", "middle": [], "last": "Hochreiter", "suffix": "" }, { "first": "", "middle": [], "last": "Schmidhuber", "suffix": "" } ], "year": 1997, "venue": "Neural Computation", "volume": "9", "issue": "8", "pages": "1735--1780", "other_ids": {}, "num": null, "urls": [], "raw_text": "S Hochreiter and J Schmidhuber. 1997. Long short- term memory. Neural Computation, 9(8):1735- 1780.", "links": null }, "BIBREF5": { "ref_id": "b5", "title": "Multi-label learning by exploiting label correlations locally", "authors": [ { "first": "Jun", "middle": [], "last": "Sheng", "suffix": "" }, { "first": "Zhi Hua", "middle": [], "last": "Huang", "suffix": "" }, { "first": "", "middle": [], "last": "Zhou", "suffix": "" } ], "year": 2012, "venue": "Twenty-Sixth AAAI Conference on Artificial Intelligence", "volume": "", "issue": "", "pages": "949--955", "other_ids": {}, "num": null, "urls": [], "raw_text": "Sheng Jun Huang and Zhi Hua Zhou. 2012. Multi-label learning by exploiting label correlations locally. In Twenty-Sixth AAAI Conference on Artificial Intelli- gence, pages 949-955.", "links": null }, "BIBREF6": { "ref_id": "b6", "title": "Towards building a social emotion detection system for online news", "authors": [ { "first": "Jingsheng", "middle": [], "last": "Lei", "suffix": "" }, { "first": "Yanghui", "middle": [], "last": "Rao", "suffix": "" }, { "first": "Qing", "middle": [], "last": "Li", "suffix": "" }, { "first": "Xiaojun", "middle": [], "last": "Quan", "suffix": "" }, { "first": "Liu", "middle": [], "last": "Wenyin", "suffix": "" } ], "year": 2014, "venue": "Future Generation Computer Systems", "volume": "37", "issue": "", "pages": "438--448", "other_ids": {}, "num": null, "urls": [], "raw_text": "Jingsheng Lei, Yanghui Rao, Qing Li, Xiaojun Quan, and Liu Wenyin. 2014. Towards building a social emotion detection system for online news. Future Generation Computer Systems, 37:438-448.", "links": null }, "BIBREF7": { "ref_id": "b7", "title": "Emotion classification of online news articles from the reader's perspective", "authors": [ { "first": "Kevin", "middle": [], "last": "Hsin-Yih Lin", "suffix": "" }, { "first": "Changhua", "middle": [], "last": "Yang", "suffix": "" }, { "first": "Hsin-Hsi", "middle": [], "last": "Chen", "suffix": "" } ], "year": 2008, "venue": "Proceedings of the 2008 IEEE/WIC/ACM International Conference on Web Intelligence and Intelligent Agent Technology", "volume": "01", "issue": "", "pages": "220--226", "other_ids": {}, "num": null, "urls": [], "raw_text": "Kevin Hsin-Yih Lin, Changhua Yang, and Hsin-Hsi Chen. 2008. Emotion classification of online news articles from the reader's perspective. In Proceed- ings of the 2008 IEEE/WIC/ACM International Con- ference on Web Intelligence and Intelligent Agent Technology-Volume 01, pages 220-226. IEEE Com- puter Society.", "links": null }, "BIBREF8": { "ref_id": "b8", "title": "An introduction to information retrieval", "authors": [ { "first": "Christopher", "middle": [ "D" ], "last": "Manning", "suffix": "" }, { "first": "Prabhakar", "middle": [], "last": "Raghavan", "suffix": "" }, { "first": "Hinrich", "middle": [], "last": "Schtze", "suffix": "" } ], "year": 2008, "venue": "Journal of the American Society for Information Science and Technology", "volume": "43", "issue": "3", "pages": "824--825", "other_ids": {}, "num": null, "urls": [], "raw_text": "Christopher D. Manning, Prabhakar Raghavan, and Hinrich Schtze. 2008. An introduction to infor- mation retrieval. Journal of the American Society for Information Science and Technology, 43(3):824- 825.", "links": null }, "BIBREF9": { "ref_id": "b9", "title": "Thirteen ways to look at the correlation coefficient", "authors": [ { "first": "W", "middle": [], "last": "", "suffix": "" }, { "first": "Alan", "middle": [], "last": "Nicewander", "suffix": "" } ], "year": 1988, "venue": "American Statistician", "volume": "42", "issue": "1", "pages": "59--66", "other_ids": {}, "num": null, "urls": [], "raw_text": "W. Alan Nicewander. 1988. Thirteen ways to look at the correlation coefficient. American Statistician, 42(1):59-66.", "links": null }, "BIBREF10": { "ref_id": "b10", "title": "Multilingual hierarchical attention networks for document classification", "authors": [ { "first": "Nikolaos", "middle": [], "last": "Pappas", "suffix": "" }, { "first": "Andrei", "middle": [], "last": "Popescu-Belis", "suffix": "" } ], "year": 2017, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Nikolaos Pappas and Andrei Popescu-Belis. 2017. Multilingual hierarchical attention networks for doc- ument classification. CoRR, abs/1707.00896.", "links": null }, "BIBREF11": { "ref_id": "b11", "title": "Glove: Global vectors for word representation", "authors": [ { "first": "Jeffrey", "middle": [], "last": "Pennington", "suffix": "" }, { "first": "Richard", "middle": [], "last": "Socher", "suffix": "" }, { "first": "Christopher", "middle": [ "D" ], "last": "Manning", "suffix": "" } ], "year": 2014, "venue": "Empirical Methods in Natural Language Processing", "volume": "", "issue": "", "pages": "1532--1543", "other_ids": {}, "num": null, "urls": [], "raw_text": "Jeffrey Pennington, Richard Socher, and Christo- pher D. Manning. 2014. Glove: Global vectors for word representation. In Empirical Methods in Natu- ral Language Processing, pages 1532-1543.", "links": null }, "BIBREF12": { "ref_id": "b12", "title": "Affective computing", "authors": [ { "first": "W", "middle": [], "last": "Rosalind", "suffix": "" }, { "first": "Roalind", "middle": [], "last": "Picard", "suffix": "" }, { "first": "", "middle": [], "last": "Picard", "suffix": "" } ], "year": 1997, "venue": "", "volume": "252", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Rosalind W Picard and Roalind Picard. 1997. Affective computing, volume 252. MIT press Cambridge.", "links": null }, "BIBREF13": { "ref_id": "b13", "title": "Sentence emotion analysis and recognition based on emotion words using ren-cecps", "authors": [ { "first": "Changqin", "middle": [], "last": "Quan", "suffix": "" }, { "first": "Fuji", "middle": [], "last": "Ren", "suffix": "" } ], "year": 2010, "venue": "International Journal of Advanced Intelligence Paradigms", "volume": "2", "issue": "1", "pages": "105--117", "other_ids": {}, "num": null, "urls": [], "raw_text": "Changqin Quan and Fuji Ren. 2010. Sentence emotion analysis and recognition based on emotion words us- ing ren-cecps. International Journal of Advanced Intelligence Paradigms, 2(1):105-117.", "links": null }, "BIBREF14": { "ref_id": "b14", "title": "Latent discriminative models for social emotion detection with emotional dependency", "authors": [ { "first": "Xiaojun", "middle": [], "last": "Quan", "suffix": "" }, { "first": "Qifan", "middle": [], "last": "Wang", "suffix": "" }, { "first": "Ying", "middle": [], "last": "Zhang", "suffix": "" }, { "first": "Luo", "middle": [], "last": "Si", "suffix": "" }, { "first": "Liu", "middle": [], "last": "Wenyin", "suffix": "" } ], "year": 2015, "venue": "ACM Trans. Inf. Syst", "volume": "34", "issue": "1", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Xiaojun Quan, Qifan Wang, Ying Zhang, Luo Si, and Liu Wenyin. 2015. Latent discriminative models for social emotion detection with emotional dependen- cy. ACM Trans. Inf. Syst., 34(1):2:1-2:19.", "links": null }, "BIBREF15": { "ref_id": "b15", "title": "Contextual sentiment topic model for adaptive social emotion classification", "authors": [ { "first": "Yanghui", "middle": [], "last": "Rao", "suffix": "" } ], "year": 2016, "venue": "IEEE Intelligent Systems", "volume": "31", "issue": "1", "pages": "41--47", "other_ids": {}, "num": null, "urls": [], "raw_text": "Yanghui Rao. 2016. Contextual sentiment topic mod- el for adaptive social emotion classification. IEEE Intelligent Systems, 31(1):41-47.", "links": null }, "BIBREF16": { "ref_id": "b16", "title": "Sentiment topic models for social emotion mining", "authors": [ { "first": "Yanghui", "middle": [], "last": "Rao", "suffix": "" }, { "first": "Qing", "middle": [], "last": "Li", "suffix": "" }, { "first": "Xudong", "middle": [], "last": "Mao", "suffix": "" }, { "first": "Wenyin", "middle": [], "last": "Liu", "suffix": "" } ], "year": 2014, "venue": "Information Sciences", "volume": "266", "issue": "5", "pages": "90--100", "other_ids": {}, "num": null, "urls": [], "raw_text": "Yanghui Rao, Qing Li, Xudong Mao, and Wenyin Liu. 2014. Sentiment topic models for social emotion mining. Information Sciences, 266(5):90-100.", "links": null }, "BIBREF17": { "ref_id": "b17", "title": "Building word-emotion mapping dictionary for online news", "authors": [ { "first": "Yanghui", "middle": [], "last": "Rao", "suffix": "" }, { "first": "Xiaojun", "middle": [], "last": "Quan", "suffix": "" }, { "first": "Liu", "middle": [], "last": "Wenyin", "suffix": "" }, { "first": "Qing", "middle": [], "last": "Li", "suffix": "" }, { "first": "Mingliang", "middle": [], "last": "Chen", "suffix": "" } ], "year": 2012, "venue": "SDAD 2012 The 1st International Workshop on Sentiment Discovery from Affective Data", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Yanghui Rao, Xiaojun Quan, Liu Wenyin, Qing Li, and Mingliang Chen. 2012. Building word-emotion mapping dictionary for online news. In SDAD 2012 The 1st International Workshop on Sentiment Dis- covery from Affective Data, page 28.", "links": null }, "BIBREF18": { "ref_id": "b18", "title": "Bidirectional recurrent neural networks", "authors": [ { "first": "M", "middle": [], "last": "Schuster", "suffix": "" }, { "first": "K", "middle": [ "K" ], "last": "Paliwal", "suffix": "" } ], "year": 2002, "venue": "IEEE Transactions on Signal Processing", "volume": "45", "issue": "11", "pages": "2673--2681", "other_ids": {}, "num": null, "urls": [], "raw_text": "M. Schuster and K.K. Paliwal. 2002. Bidirectional re- current neural networks. IEEE Transactions on Sig- nal Processing, 45(11):2673-2681.", "links": null }, "BIBREF19": { "ref_id": "b19", "title": "Machine learning in automated text categorization", "authors": [ { "first": "Fabrizio", "middle": [], "last": "Sebastiani", "suffix": "" } ], "year": 2001, "venue": "Acm Computing Surveys", "volume": "34", "issue": "1", "pages": "1--47", "other_ids": {}, "num": null, "urls": [], "raw_text": "Fabrizio Sebastiani. 2001. Machine learning in auto- mated text categorization. Acm Computing Surveys, 34(1):1-47.", "links": null }, "BIBREF20": { "ref_id": "b20", "title": "Semeval-2007 task 14: Affective text", "authors": [ { "first": "Carlo", "middle": [], "last": "Strapparava", "suffix": "" }, { "first": "Rada", "middle": [], "last": "Mihalcea", "suffix": "" } ], "year": 2007, "venue": "Proceedings of the 4th International Workshop on Semantic Evaluations", "volume": "", "issue": "", "pages": "70--74", "other_ids": {}, "num": null, "urls": [], "raw_text": "Carlo Strapparava and Rada Mihalcea. 2007. Semeval- 2007 task 14: Affective text. In Proceedings of the 4th International Workshop on Semantic Evalu- ations, pages 70-74. Association for Computational Linguistics.", "links": null }, "BIBREF21": { "ref_id": "b21", "title": "Detecting emotions in social media: A constrained optimization approach", "authors": [ { "first": "Yichen", "middle": [], "last": "Wang", "suffix": "" }, { "first": "Aditya", "middle": [], "last": "Pal", "suffix": "" } ], "year": 2015, "venue": "Proceedings of the Twenty-Fourth International Joint Conference on Artificial Intelligence (IJCAI 2015)", "volume": "", "issue": "", "pages": "996--1002", "other_ids": {}, "num": null, "urls": [], "raw_text": "Yichen Wang and Aditya Pal. 2015. Detecting e- motions in social media: A constrained optimiza- tion approach. In Proceedings of the Twenty-Fourth International Joint Conference on Artificial Intelli- gence (IJCAI 2015), pages 996-1002.", "links": null }, "BIBREF22": { "ref_id": "b22", "title": "An interpretable neural network with topical information for relevant emotion ranking", "authors": [ { "first": "Yang", "middle": [], "last": "Yang", "suffix": "" }, { "first": "Deyu", "middle": [], "last": "Zhou", "suffix": "" }, { "first": "Yulan", "middle": [], "last": "He", "suffix": "" } ], "year": 2018, "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", "volume": "", "issue": "", "pages": "3423--3432", "other_ids": {}, "num": null, "urls": [], "raw_text": "Yang Yang, Deyu Zhou, and Yulan He. 2018. An in- terpretable neural network with topical information for relevant emotion ranking. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, Brussels, Belgium, October 31 -November 4, 2018, pages 3423-3432.", "links": null }, "BIBREF23": { "ref_id": "b23", "title": "Hierarchical attention networks for document classification", "authors": [ { "first": "Zichao", "middle": [], "last": "Yang", "suffix": "" }, { "first": "Diyi", "middle": [], "last": "Yang", "suffix": "" }, { "first": "Chris", "middle": [], "last": "Dyer", "suffix": "" }, { "first": "Xiaodong", "middle": [], "last": "He", "suffix": "" }, { "first": "Alex", "middle": [], "last": "Smola", "suffix": "" }, { "first": "Eduard", "middle": [], "last": "Hovy", "suffix": "" } ], "year": 2016, "venue": "", "volume": "", "issue": "", "pages": "1480--1489", "other_ids": { "DOI": [ "10.18653/v1/N16-1174" ] }, "num": null, "urls": [], "raw_text": "Zichao Yang, Diyi Yang, Chris Dyer, Xiaodong He, Alex Smola, and Eduard Hovy. 2016. Hierarchi- cal attention networks for document classification. pages 1480-1489.", "links": null }, "BIBREF24": { "ref_id": "b24", "title": "Lift: multi-label learning with label-specific features", "authors": [ { "first": "Min", "middle": [ "Ling" ], "last": "Zhang", "suffix": "" } ], "year": 2011, "venue": "International Joint Conference on Artificial Intelligence", "volume": "", "issue": "", "pages": "1609--1614", "other_ids": {}, "num": null, "urls": [], "raw_text": "Min Ling Zhang. 2011. Lift: multi-label learning with label-specific features. In International Joint Con- ference on Artificial Intelligence, pages 1609-1614.", "links": null }, "BIBREF25": { "ref_id": "b25", "title": "Multilabel neural networks with applications to functional genomics and text categorization", "authors": [ { "first": "Min", "middle": [ "Ling" ], "last": "Zhang", "suffix": "" }, { "first": "Zhi Hua", "middle": [], "last": "Zhou", "suffix": "" } ], "year": 2006, "venue": "IEEE Transactions on Knowledge Data Engineering", "volume": "18", "issue": "10", "pages": "1338--1351", "other_ids": {}, "num": null, "urls": [], "raw_text": "Min Ling Zhang and Zhi Hua Zhou. 2006. Multilabel neural networks with applications to functional ge- nomics and text categorization. IEEE Transaction- s on Knowledge Data Engineering, 18(10):1338- 1351.", "links": null }, "BIBREF26": { "ref_id": "b26", "title": "A review on multi-label learning algorithms", "authors": [ { "first": "Min-Ling", "middle": [], "last": "Zhang", "suffix": "" }, { "first": "Zhi-Hua", "middle": [], "last": "Zhou", "suffix": "" } ], "year": 2014, "venue": "IEEE transactions on knowledge and data engineering", "volume": "26", "issue": "8", "pages": "1819--1837", "other_ids": {}, "num": null, "urls": [], "raw_text": "Min-Ling Zhang and Zhi-Hua Zhou. 2014. A re- view on multi-label learning algorithms. IEEE transactions on knowledge and data engineering, 26(8):1819-1837.", "links": null }, "BIBREF27": { "ref_id": "b27", "title": "Sentencestate lstm for text representation", "authors": [ { "first": "Yue", "middle": [], "last": "Zhang", "suffix": "" }, { "first": "Qi", "middle": [], "last": "Liu", "suffix": "" }, { "first": "Linfeng", "middle": [], "last": "Song", "suffix": "" } ], "year": 2018, "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", "volume": "", "issue": "", "pages": "317--327", "other_ids": {}, "num": null, "urls": [], "raw_text": "Yue Zhang, Qi Liu, and Linfeng Song. 2018. Sentence- state lstm for text representation. In Proceedings of the 56th Annual Meeting of the Association for Com- putational Linguistics, pages 317-327. Association for Computational Linguistics.", "links": null }, "BIBREF28": { "ref_id": "b28", "title": "Relevant emotion ranking from text constrained with emotion relationships", "authors": [ { "first": "Deyu", "middle": [], "last": "Zhou", "suffix": "" }, { "first": "Yang", "middle": [], "last": "Yang", "suffix": "" }, { "first": "Yulan", "middle": [], "last": "He", "suffix": "" } ], "year": 2018, "venue": "Meeting of the North American Chapter of the Association for Computation Linguistics", "volume": "", "issue": "", "pages": "561--571", "other_ids": {}, "num": null, "urls": [], "raw_text": "Deyu Zhou, Yang Yang, and Yulan He. 2018. Relevan- t emotion ranking from text constrained with emo- tion relationships. In Meeting of the North American Chapter of the Association for Computation Linguis- tics, pages 561-571.", "links": null }, "BIBREF29": { "ref_id": "b29", "title": "Emotion distribution learning from texts", "authors": [ { "first": "Deyu", "middle": [], "last": "Zhou", "suffix": "" }, { "first": "Xuan", "middle": [], "last": "Zhang", "suffix": "" }, { "first": "Yin", "middle": [], "last": "Zhou", "suffix": "" }, { "first": "Quan", "middle": [], "last": "Zhao", "suffix": "" }, { "first": "Xin", "middle": [], "last": "Geng", "suffix": "" } ], "year": 2016, "venue": "Conference on Empirical Methods in Natural Language Processing", "volume": "", "issue": "", "pages": "638--647", "other_ids": {}, "num": null, "urls": [], "raw_text": "Deyu Zhou, Xuan Zhang, Yin Zhou, Quan Zhao, and Xin Geng. 2016. Emotion distribution learning from texts. In Conference on Empirical Methods in Natu- ral Language Processing, pages 638-647.", "links": null } }, "ref_entries": { "FIGREF0": { "uris": null, "text": "An example of an online news article with the readers' votes on various emotion categories. Words highlighted in red are event-indicative words.", "type_str": "figure", "num": null }, "FIGREF1": { "uris": null, "text": "\u2022 Rank-SVM(Zhang and Zhou, 2014) distinguishes relevant labels from irrelevant ones with large margin strategy.\u2022 MLLOC (Huang and Zhou, 2012) exploits local emotion correlations in expression data.", "type_str": "figure", "num": null }, "FIGREF2": { "uris": null, "text": "Case Study of Interpretability on Three Corpora.", "type_str": "figure", "num": null }, "TABREF2": { "content": "", "text": "Statistics for the three corpora used in our experiments.", "type_str": "table", "num": null, "html": null }, "TABREF4": { "content": "
", "text": "", "type_str": "table", "num": null, "html": null }, "TABREF6": { "content": "
", "text": "Comparison with Emotion Detection Methods and Multi-label Methods. 'PL' represent Pro Loss, 'HL'", "type_str": "table", "num": null, "html": null } } } }