|
{ |
|
"paper_id": "D19-1019", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T16:00:49.749839Z" |
|
}, |
|
"title": "Using Customer Service Dialogues for Satisfaction Analysis with Context-Assisted Multiple Instance Learning", |
|
"authors": [ |
|
{ |
|
"first": "Kaisong", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Alibaba Group", |
|
"institution": "", |
|
"location": { |
|
"settlement": "Hangzhou", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Lidong", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Alibaba Group", |
|
"institution": "", |
|
"location": { |
|
"settlement": "Hangzhou", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Victoria University of Wellington", |
|
"location": { |
|
"settlement": "Wellington", |
|
"country": "New Zealand" |
|
} |
|
}, |
|
"email": "wei.gao@vuw.ac.nz" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Alibaba Group", |
|
"institution": "", |
|
"location": { |
|
"settlement": "Hangzhou", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Lujun", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Alibaba Group", |
|
"institution": "", |
|
"location": { |
|
"settlement": "Hangzhou", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jiancheng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Soochow University", |
|
"location": { |
|
"settlement": "Suzhou", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "jiancheng.wang@qq.com" |
|
}, |
|
{ |
|
"first": "Changlong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Alibaba Group", |
|
"institution": "", |
|
"location": { |
|
"settlement": "Hangzhou", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "changlong.scl@taobao.com" |
|
}, |
|
{ |
|
"first": "Xiaozhong", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Indiana University", |
|
"location": { |
|
"settlement": "Bloomington", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Qiong", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Alibaba Group", |
|
"institution": "", |
|
"location": { |
|
"settlement": "Hangzhou", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "qz.zhang@alibaba-inc.com" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Customers ask questions and customer service staffs answer their questions, which is the basic service model via multi-turn customer service (CS) dialogues on E-commerce platforms. Existing studies fail to provide comprehensive service satisfaction analysis, namely satisfaction polarity classification (e.g., well satisfied, met and unsatisfied) and sentimental utterance identification (e.g., positive, neutral and negative). In this paper, we conduct a pilot study on the task of service satisfaction analysis (SSA) based on multi-turn CS dialogues. We propose an extensible Context-Assisted Multiple Instance Learning (CAMIL) model to predict the sentiments of all the customer utterances and then aggregate those sentiments into service satisfaction polarity. After that, we propose a novel Context Clue Matching Mechanism (CCMM) to enhance the representations of all customer utterances with their matched context clues, i.e., sentiment and reasoning clues. We construct two CS dialogue datasets from a top E-commerce platform. Extensive experimental results are presented and contrasted against a few previous models to demonstrate the efficacy of our model. 1", |
|
"pdf_parse": { |
|
"paper_id": "D19-1019", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Customers ask questions and customer service staffs answer their questions, which is the basic service model via multi-turn customer service (CS) dialogues on E-commerce platforms. Existing studies fail to provide comprehensive service satisfaction analysis, namely satisfaction polarity classification (e.g., well satisfied, met and unsatisfied) and sentimental utterance identification (e.g., positive, neutral and negative). In this paper, we conduct a pilot study on the task of service satisfaction analysis (SSA) based on multi-turn CS dialogues. We propose an extensible Context-Assisted Multiple Instance Learning (CAMIL) model to predict the sentiments of all the customer utterances and then aggregate those sentiments into service satisfaction polarity. After that, we propose a novel Context Clue Matching Mechanism (CCMM) to enhance the representations of all customer utterances with their matched context clues, i.e., sentiment and reasoning clues. We construct two CS dialogue datasets from a top E-commerce platform. Extensive experimental results are presented and contrasted against a few previous models to demonstrate the efficacy of our model. 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "In the past decades, E-commerce platforms, such as Amazon.com and Taobao.com 2 , have evolved into the most comprehensive and prosperous business ecosystems. They not only deeply involve other traditional businesses such as payment and logistics, but also largely transform every aspect of retailing. Taking the customer service on Taobao as an example, third-party retailers are always online to answer any question at any stage of pre-sale, sale and after-sale, through an 1 We have released the dataset at https://github. com/songkaisong/ssa.", |
|
"cite_spans": [ |
|
{ |
|
"start": 475, |
|
"end": 476, |
|
"text": "1", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "2 Taobao is a top E-commerce platform in China. Figure 1: Customer utterance and context clues (i.e., sentiment and reasoning clues) alignments in multiturn dialogue utterances of an unsatisfied customer service. The utterances (u i ) with positive/neutral/negative sentiments are denoted by red/orange/blue boxes.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "instant messenger within the platform. The topics of relevant customer service dialogues involve various aspects of online shopping, such as product information, return or exchange, logistics, etc. Based on a previous survey, over 77% of buyers on Taobao communicated with sellers before placing an order (Gao and Zhang, 2011) . Therefore, such service dialogue data contain very important clues for sellers to improve their service quality. Figure 1 depicts an exemplar dialogue of online customer service, which has a form of multi-turn dialogue between the customer and the customer service staff (or \"the server\" for short). In this dialogue, the customer is asking for refunding the freight he/she paid for sending back the product. At the end of service dialogue, the E-commerce platform invites the customer to score the service quality (e.g., using 1-5 stars denoting the extent of satisfaction from \"very unsatisfied\" to \"very satisfied\") via instant messages or a grading interface. Evidently, the customer feels unsatisfied with the response. Automatically detecting such unsatisfactory service is important. For the retail shopkeepers, they can quickly locate such service dialogue and find out the reason to take remedial actions. For the platform, by detecting and analyzing such cases, the platform can define clear-cut rules, say \"not fitting well is not a quality issue, and the buyers should pay the freight for freight.\"", |
|
"cite_spans": [ |
|
{ |
|
"start": 305, |
|
"end": 326, |
|
"text": "(Gao and Zhang, 2011)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 442, |
|
"end": 450, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we define a new task named Service Satisfaction Analysis (SSA): Given a service dialogue between the customer and the service staff, the task aims at predicting the customer's satisfaction, i.e., if the customer is satisfied by the responses from the service staff, meanwhile locating possible sentiment reasons, i.e., sentiment identification of the customer utterances. For example, Figure 1 gives the satisfaction prediction of the service as \"unsatisfied\" and identifies the detailed sentiments of all customer utterances. Obviously, SSA focuses on two special cases of text classification over predefined satisfaction labels (\"well satisfied/met/unsatisfied\") and predefined sentiment labels (\"positive/neutral/negative\"). Text classification has been widely studied for decades, such as sentiment classification on product reviews (Song et al., 2017; Chen et al., 2017; Li et al., , 2019 , stance classification on tweets or blogs (Du et al., 2017; Liu, 2010) , emotion classification for chit-chat (Majumder et al., 2018) , etc. However, all these methods cannot deal with these two classification tasks simultaneously in a unified framework. Although recent studies on multi-task learning framework suggest that closely related tasks can improve each other mutually from separated supervision information (Ma et al., 2018; Cerisara et al., 2018; , the acquisition of sentence (or utterance)-level sentiment labels, which is required by multi-task learning, remains a laborious and expensive endeavor. In contrast, coarse-grained document (or dialogue)-level annotations are relatively easy to obtain due to the widespread use of opinion grading interfaces (e.g., ratings).", |
|
"cite_spans": [ |
|
{ |
|
"start": 852, |
|
"end": 871, |
|
"text": "(Song et al., 2017;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 872, |
|
"end": 890, |
|
"text": "Chen et al., 2017;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 891, |
|
"end": 908, |
|
"text": "Li et al., , 2019", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 952, |
|
"end": 969, |
|
"text": "(Du et al., 2017;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 970, |
|
"end": 980, |
|
"text": "Liu, 2010)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 1020, |
|
"end": 1043, |
|
"text": "(Majumder et al., 2018)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 1328, |
|
"end": 1345, |
|
"text": "(Ma et al., 2018;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 1346, |
|
"end": 1368, |
|
"text": "Cerisara et al., 2018;", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 400, |
|
"end": 408, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Recently, Multiple Instance Learning (MIL) framework is adopted for performing documentlevel and sentence-level sentiment classification simultaneously while only using document-level sentiment annotations (Zhou et al., 2009; Wang and Wan, 2018) . However, these models are trained based on plain textual data which are in a much simpler form than our multi-turn dialogue structure. Specifically, customer service dialogue has unique characteristics. Customer utterances tend to have more sentiment changes during the customer service dialogue which affect customer's final satisfaction. Figure 1 illustrates that satisfaction polarity (\"unsatisfied\") is mostly embedded in the last few customer utterances (i.e., u 7 , u 9 and u 10 ) 3 . On the other hand, a well-trained server varies less by always expressing positive/neutral utterances which contain helpful sentiment clues and reasoning clues. In this work, both sentiment clue and reasoning clue are called context clues which can directly or indirectly influence satisfaction polarity and need to be given special treatments in the model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 206, |
|
"end": 225, |
|
"text": "(Zhou et al., 2009;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 226, |
|
"end": 245, |
|
"text": "Wang and Wan, 2018)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 588, |
|
"end": 596, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To deal with the issues, we propose a novel and extensible Context-Assisted Multiple Instance Learning (CAMIL) model for the new SSA task, and utterance-level sentiment classification and dialogue-level satisfaction classification will be done simultaneously only under the supervision of satisfaction labels. We motivate the idea of our context-assisted modeling solution based on the hypothesis that if a customer utterance does not have enough information to create a sound vector representation for sentiment prediction, we try to enhance it with a complementary representation derived from context clues via our position-guided Context Clue Matching Mechanism (CCMM). Overall, our contributions are three-fold:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We introduce a new SSA task based on customer service dialogues. We thus propose a novel CAMIL model to predict the sentiment distributions of all customer utterances, and then aggregate those distributions to determine the final satisfaction polarity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We further propose an automatic CCMM to associate each customer utterance with its most relevant context clues, and then generate a complementary vector which enhances the customer utterance representation for better sentiment classification to boost the final satisfaction classification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Two real-world CS dialogue datasets are collected from a top E-commerce platform. The experimental results demonstrate that our model is effective for the SSA task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Service satisfaction analysis (SSA) is closely related to sentiment analysis (SA), because the sentiment of the customer utterances is a basic clue signaling the customer's satisfaction. Existing SA works aim to predict sentiment polarities (positive, neutral and negative) for subjective texts in different granularities, such as word Song et al., 2016) , sentence (Ma et al., 2017) , short text and document (Yang et al., 2018a) . In these studies, subjective texts are always considered as a sequence of words 4 . More recently, some researchers started to explore the utterance-level structure for sentiment classification, such as modeling dialogues via a hierarchical RNN in both word level and utterance level (Cerisara et al., 2018) or keeping track of sentiment states of dialogue participants (Majumder et al., 2018) . However, none of these works can do dialogue-level satisfaction classification and utterance-level sentiment classification simultaneously. Recent studies (Cerisara et al., 2018; Ma et al., 2018; employing multi-task learning open a possibility to address this issue. However, these models must be trained under the supervision of both documentlevel and sentence-level sentiment labels in which the later are generally not easy to obtain. Sentiment classification based on Multiple Instance Learning (MIL) frameworks (Wang and Wan, 2018; Angelidis and Lapata, 2018) aims to perform document-level and sentence-level sentiment classification tasks simultaneously with the supervision of document labels only.", |
|
"cite_spans": [ |
|
{ |
|
"start": 336, |
|
"end": 354, |
|
"text": "Song et al., 2016)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 366, |
|
"end": 383, |
|
"text": "(Ma et al., 2017)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 410, |
|
"end": 430, |
|
"text": "(Yang et al., 2018a)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 717, |
|
"end": 740, |
|
"text": "(Cerisara et al., 2018)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 803, |
|
"end": 826, |
|
"text": "(Majumder et al., 2018)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 984, |
|
"end": 1007, |
|
"text": "(Cerisara et al., 2018;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 1008, |
|
"end": 1024, |
|
"text": "Ma et al., 2018;", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Angelidis and Lapata (2018) proposed an MIL model for fine-grained sentiment analysis. Wang and Wan (2018) further applied the model to peerreviewed research papers by integrating a memory built from abstracts. However, their models are not suitable for our SSA task because they ignore the dialogue structure of arbitrary interactions between customers and servers. In contrast, we consider complex multi-turn interactions within dialogues and explore context clue matching between customer utterances and server utterances for multi-tasking in the SSA task. Specifically, we improve the basic MIL models by proposing a position-guided automatic context clue matching mechanism (CCMM) to conduct customer utterance and context clues alignments for better sentiment classification to boost satisfaction classification. Other related work related to sentiment analysis for subjective texts in different granularities include (Yang et al., 2016; Wu and Huang, 2016; Yang et al., 2018b; Du et al., 2017; Song et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 87, |
|
"end": 106, |
|
"text": "Wang and Wan (2018)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 924, |
|
"end": 943, |
|
"text": "(Yang et al., 2016;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 944, |
|
"end": 963, |
|
"text": "Wu and Huang, 2016;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 964, |
|
"end": 983, |
|
"text": "Yang et al., 2018b;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 984, |
|
"end": 1000, |
|
"text": "Du et al., 2017;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 1001, |
|
"end": 1019, |
|
"text": "Song et al., 2019)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In order to predict service satisfaction and identify sentiments of all customer utterances with available satisfaction labels, we propose a CAMIL model based on multiple instance learning approach. Figure 2 shows the architecture of our model which consists of three layers: Input Representation Layer, Sentiment Classification Layer and Satisfaction Classification Layer. In this section, we will describe the model in detail.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 199, |
|
"end": 207, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Context-Assisted MIL Network", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Let each utterance u i = w 1 , ..., w |u i | be a sequence of words. By adopting word embeddings and semantic composition models such as Recurrent Neural Network (RNN), we can learn the utterance representation. In this work, we adopt a standard LSTM model (Hochreiter and Schmidhuber, 1997) to learn a fixed-size utterance representation v u i \u2208 R k , where k is the size of LSTM hidden state. Specifically, we first convert the words in each utterance u i to the corresponding word embeddings E u i \u2208 R d\u00d7|u i | which are then fed into a LSTM for obtaining the last hidden state as the v u i , where d is the dimensionality of word embeddings. Formally, we have", |
|
"cite_spans": [ |
|
{ |
|
"start": 257, |
|
"end": 291, |
|
"text": "(Hochreiter and Schmidhuber, 1997)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Representation Layer", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "v u i = LSTM(E u i ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Representation Layer", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We conjecture that the participants (i.e., customer and server) play different roles in CS dialogue. Our hypothesis is that satisfaction polarity can be more or less conveyed by the sentiments of key customer utterances, and meanwhile the sentiments of server utterances are generally polite or non-negative and contain text with context clues which complement the target customer's ut- terances and indirectly affect satisfaction polarity. Thus, we separately denote the customer utterance representations as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Representation Layer", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "{v c 1 , v c 2 , ..., v c M } and server utterance representations as {v s 1 , v s 2 , ..., v s N },", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Representation Layer", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where M + N = L and L is the total number of utterances in the dialogue.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Representation Layer", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Customer utterances tend to have more direct impact on the dominating satisfaction polarity. However, short utterance texts may not contain enough information for semantic representation. Thus, considering context to enhance utterance representation is a natural and reasonable choice. Given a specific customer utterance vector v ct , we use a context clue matching mechanism, namely CCMM (see Section 4), to produce matched context representation c ct \u2208 R k as below:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment Classification Layer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "c ct = CCMM v ct , {v s t |1 \u2264 t \u2264 N }", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Sentiment Classification Layer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where v s t is any server utterance representation. After that, v ct can be enhanced by c ct via concatenation for a combined representationv ct = v ct \u2295 c ct . Compared to v ct ,v ct \u2208 R 2k contains more evidence for sentiment prediction. Then, we feed the representation sequence", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment Classification Layer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "{v c 1 ,v c 2 , ...,v c M }", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment Classification Layer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "into a standard LSTM for obtaining a segment representation h ct \u2208 R k at each time step t, i.e., h ct = LSTM v ct .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment Classification Layer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Finally, each segment representation h ct is fed into a linear layer and then a softmax function for predicting its sentiment distribution over sentiment labels G = {positive, neutral, negative}:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment Classification Layer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p ct = softmax(W s h ct + b s )", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Sentiment Classification Layer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where W s \u2208 R |G|\u00d7k and b s \u2208 R |G| are trainable parameters shared across all segments, and p ct \u2208 R |G| is the sentiment distribution for utterance u ct .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment Classification Layer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In the simplest case, satisfaction polarities C = {well satisfied, met, unsatisfied} can be computed by averaging all predicted sentiment distributions of customer utterances as y = 1 M t\u2208[1,M ] p ct . However, it is a crude way of combining sentiment distributions uniformly, as not all distributions convey equally important sentiment clues. In Figure 1 , for example, the satisfaction polarity (\"unsatisfied\") is mostly determined by customer utterances u 7 , u 9 and u 10 which are relatively more crucial than other ones. We opt for an attention mechanism to reward segments that are more likely to be good sentiment predictors. Therefore, we measure the importance of each segment representation h ct through a scoring function using feed forward neural network as below:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 347, |
|
"end": 355, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Satisfaction Classification Layer", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03b1 ct = softmax v T tanh(W u h ct + b u )", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Satisfaction Classification Layer", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "where W u \u2208 R k\u00d7k , b u \u2208 R k and v \u2208 R k are trainable parameters, v can be seen as a high-level representation of a fixed query \"what is the informative segment\" like that used in (Yang et al., 2016) . Finally, we obtain the satisfaction distribution y \u2208 R |C| as the weighted sum of sentiment distributions of all the customer utterances by:", |
|
"cite_spans": [ |
|
{ |
|
"start": 182, |
|
"end": 201, |
|
"text": "(Yang et al., 2016)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Satisfaction Classification Layer", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "y = t\u2208[1,M] \u03b1 ct p ct (4)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Satisfaction Classification Layer", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Note that in the training dataset, our approach only needs the dialogue's satisfaction labels while the utterance sentiment labels are unobserved. Therefore, we use the categorical cross-entropy loss to minimize the error between the distribution of the output satisfaction polarity and that of the goldstandard satisfaction label of the dialogue by:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training and Parameter Learning", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "L(\u0398) = \u2212 j\u2208[1,T ] i\u2208C g j i log(y j i )", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Training and Parameter Learning", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "where g j i is 1 or 0 indicating whether the i th class is a correct answer for the j th training instance, y j i is the predicted satisfaction probability distribution, and \u0398 denotes the trainable parameter set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training and Parameter Learning", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "After learning \u0398, we feed each test instance into the final model, and the label with the highest probability stands for the predicted satisfaction polarity. We use back propagation to calculate the gradients of all the model parameters, and update them with Momentum optimizer (Qian, 1999) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 278, |
|
"end": 290, |
|
"text": "(Qian, 1999)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training and Parameter Learning", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "Server utterances provide helpful context clues which can be defined as sentiment and reasoning clues by the positions of server utterances. Thus, we introduce the position-guided automatic context clue matching mechanism (CCMM) used to match each customer utterance with its most related server utterances, which contain two layers: the position attention layer and the utterance attention layer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context Clue Matching Mechanism", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Sentiment and Reasoning Clues: Server utterances provide helpful context clues for each targeted customer utterance. Here, we aim to locate helpful context clues in server utterances which are categorized as sentiment clues and reasoning clues. Sentiment clues refer to the server utterances that appear preceding the targeted customer utterance and trigger its sentiment expres-sion, such as server utterance u 6 leading to customer displeasure of the utterance u 7 in the Figure 1. Reasoning clues are the server utterances that appear following the targeted customer utterance and respond to its concerns, such as server utterance u 6 responding to the customer utterance u 5 in the Figure 1 . Both types of clues are identified by the proposed attention layers along with position information.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 474, |
|
"end": 480, |
|
"text": "Figure", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 686, |
|
"end": 694, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Context Clue Matching Mechanism", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Position Attention Layer: Typically, customer utterances are more likely to be triggered or answered by the server utterances near them. Let p(\u2022) denote the position function of any utterance in the original dialogue, such as p(u c 2 ) = 3 in Figure 1. For any customer utterance u ct , the preceding server utterances {u s t |p(u s t ) < p(u ct )} may provide sentiment clues, and the following server utterances {u s t |p(u s t ) > p(u ct )} may contain reasoning clues. By considering both directions, we compute the position attention weight g(\u2022) by:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 243, |
|
"end": 249, |
|
"text": "Figure", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Context Clue Matching Mechanism", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "g(u ct , u s t ) = 1 \u2212 |p(u ct ) \u2212 p(u s t )| L", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "Context Clue Matching Mechanism", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Then, the weighted output after this layer is formulated as below:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context Clue Matching Mechanism", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "o s t = g(u ct , u s t ) * h s t * I(u ct , u s t )", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "Context Clue Matching Mechanism", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "where o s t \u2208 R k is the weighted h s t for t \u2208 [1, N ], the notation I(\u2022) denotes a masking function which can be used to reserve either only sentiment clues (i.e., if p(u ct ) > p(u s t ), I(u ct , u s t ) equals to 1, or 0 otherwise) or only reasoning clues (i.e., if p(u ct ) < p(u s t ), I(u ct , u s t ) equals to 1, or 0 otherwise). Here, we suggest to consider both sentiment and reasoning clues, so I(u ct , u s t ) is a constant 1. Finally, we construct memory O \u2208 R k\u00d7N as below:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context Clue Matching Mechanism", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "O = [o s 1 ; o s 2 ; . . . ; o s N ]", |
|
"eq_num": "(8)" |
|
} |
|
], |
|
"section": "Context Clue Matching Mechanism", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Utterance Attention Layer: Only a fraction of server utterances can match every customer utterance in sentiment or content, such as the exemplar dialogue in Figure 1 . So, we introduce an attention strategy which enables our model to attend on server utterances of different importance when constructing a complementary context representation for any customer utterance. Considering customer utterance representation h ct as an index, we can produce a context vector c ct \u2208 R k using a weighted sum of each piece o s t of memory O: where \u03b2 s t \u2208 [0, 1] is the attention weight calculated based on a scoring function using a feed forward neural network as follow:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 157, |
|
"end": 165, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Context Clue Matching Mechanism", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "c ct = t \u2208[1,N ] \u03b2 s t o s t", |
|
"eq_num": "(9)" |
|
} |
|
], |
|
"section": "Context Clue Matching Mechanism", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03b2 s t = softmax v T tanh(W c o s t h ct + b c )", |
|
"eq_num": "(10)" |
|
} |
|
], |
|
"section": "Context Clue Matching Mechanism", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "where W c \u2208 R 2k\u00d72k ,v \u2208 R 2k and b c \u2208 R 2k are trainable parameters.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context Clue Matching Mechanism", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Our experiments are conducted based on two Chinese CS dialogue datasets, namely Clothes and Makeup, collected from a top E-commerce platform. Note that our proposed method is language independent and can be applied to other languages directly. Clothes is a corpus with 10K dialogues in the Clothes domain and Makeup is a balanced corpus with 3,540 dialogues in the Makeup domain. Both datasets have service satisfaction ratings in 1-5 stars from customer feedbacks. Meanwhile, we also annotate all the utterances in both datasets with sentiment labels for testing. In this study, we conduct two classification tasks: one is to predict in three satisfaction classes, i.e., \"unsatisfied\" (1-2 stars), \"met\" (3 stars) and \"satisfied\" (4-5 stars), and the other is to predict in three sentiment classes, i.e., \"negative/neutral/positive\". All texts are tokenized by a popular Chinese word segmentation utility called jieba 5 . After preprocessing, the datasets are partitioned for training, validation and test with a 80/10/10 split. A summary of statistics for both datasets are given in Table 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1085, |
|
"end": 1092, |
|
"text": "Table 1", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset and Experimental Settings", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "For all the methods, we apply fine-tuning for the word vectors, which can improve the performance. The word vectors are initialized by word embeddings that are trained on both datasets with 5 https://pypi.org/project/jieba/ CBOW (Mikolov et al., 2013) , where the dimension is 300 and the vocabulary size is 23.3K. Other trainable model parameters are initialized by sampling values from a uniform distribution U(\u22120.01, 0.01). The size of LSTM hidden states k is set as 128. The hyper-parameters are tuned on the validation set. Specifically, the initial learning rate is fixed as 0.1, the dropout rate is 0.2, the batch size is 32 and the number of epochs is 20. The performances of both satisfaction and sentiment classifications are evaluated using standard Macro F1 and Accuracy.", |
|
"cite_spans": [ |
|
{ |
|
"start": 229, |
|
"end": 251, |
|
"text": "(Mikolov et al., 2013)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset and Experimental Settings", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We compare our proposed approach with the following state-of-the-art Sentiment Analysis (SA) methods which can be grouped into two types: plain SA models and dialogue SA models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparative Study", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Plain SA models consider dialogue as plain text and ignore utterance matching, say, utterance is seen as sentence and dialogue as document.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparative Study", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "1) LSTM: We use word vectors as the input of a standard LSTM (Hochreiter and Schmidhuber, 1997) and feed the last hidden state into a softmax layer for satisfaction prediction.", |
|
"cite_spans": [ |
|
{ |
|
"start": 61, |
|
"end": 95, |
|
"text": "(Hochreiter and Schmidhuber, 1997)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparative Study", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "2) HAN: A hierarchical attention network for document classification (Yang et al., 2016) , which has two levels of attention mechanisms applied at word-and utterance-level, enabling it to attend differentially to more and less important content when constructing the dialogue representation and feeding it into a softmax layer for classification.", |
|
"cite_spans": [ |
|
{ |
|
"start": 69, |
|
"end": 88, |
|
"text": "(Yang et al., 2016)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparative Study", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "3) HRN: A hierarchical recurrent network for joint sentiment and act sequence recognition (Cerisara et al., 2018) . It uses a bi-directional LSTM to represent utterances which are then fed into a standard LSTM for dialogue representation as the input of a softmax layer for classification. 4) MILNET: A multiple instance learning network for document-level and sentence-level sentiment analysis (Angelidis and Lapata, 2018) . The original method is designed for plain textual data, which does not consider CS dialogue structure. In addition, their method ignores long-range dependencies among customer sentiments (i.e., without segment encoder in Figure 2 ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 90, |
|
"end": 113, |
|
"text": "(Cerisara et al., 2018)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 395, |
|
"end": 423, |
|
"text": "(Angelidis and Lapata, 2018)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 647, |
|
"end": 655, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comparative Study", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Dialogue SA models consider utterance matching between customer and server utterances. 5) HMN: A hierarchical matching network for sentiment analysis, which uses a question-answer bidirectional matching layer to learn the matching vector of each QA pair (i.e., customer utterance, server utterance) and then characterizes the importance of the generated matching vectors via a self-matching attention layer (Shen et al., 2018) . However, the amount of pairs within a dialogue is huge, which leads to expensive calculations. Meanwhile, it considers the sentiments of server utterances, which will mislead final prediction. 6) CAMIL s , CAMIL r and CAMIL f ull : Our CAMIL models with only sentiment clues, only reasoning clues, and both of them, respectively, by setting masking function (see Equation 7).", |
|
"cite_spans": [ |
|
{ |
|
"start": 407, |
|
"end": 426, |
|
"text": "(Shen et al., 2018)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparative Study", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "All the methods are implemented by ourselves with TensorFlow 6 and run on a server configured with a Tesla V100 GPU, 2 CPU and 32G memory.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparative Study", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Results and Analysis: The results of comparisons are reported in Table 2 . It indicates that LSTM cannot compete with other methods because it simply considers dialogues as word sequences but ignores the utterance matching. HAN and HRN perform much better by using a twolayer architecture (i.e., utterance and dialogue), but they ignore the utterance interactions. Besides, HRN treats the sentiment analysis task and the service satisfaction analysis task separately, and ignores their sentiment dependence. HMN uses a heuristic question-answering matching strategy, which is not enough flexible and easily causes mismatching issues. MILNET is the most related work, but its simplistic alignment model weakens prediction performance when facing on our complex customer service dialogue structure. MILNET however does not consider the dialogue structure and introduces unrelated sentiments from server utterances. CAMIL r and CAMIL s only consider either sentiment or reasoning clues, so they cannot compete with CAMIL f ull which considers both in dialogues. Partially configured model CAMIL r (or CAMIL s ) only considers sentiment (or reasoning) clues and performs worse than our full model CAMIL. This verifies that both types of clues are helpful and complementary, and they should be employed simultaneously.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 65, |
|
"end": 72, |
|
"text": "Table 2", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comparative Study", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "On Clothes corpus, compared to the met class, the performances of all models on the satisfied class are much worse, because when the two classes cannot be well distinguished the models tend to predict the majority class (i.e., met) to minimize the loss. On Makeup corpus which is a balanced dataset, the performances on the met and 6 https://www.tensorflow.org/ satisfied classes are less distinctive, but both are consistently worse than the unsatisfied class.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparative Study", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Different model configurations can largely affect the performance. We implement several model variants for ablation tests: Server and Customer consider only server and customer utterances in a dialogue, respectively. NoPos ignores the prior position information. Average takes the average of all the sentiment distributions for classification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ablation Study", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Voting directly maps the majority sentiment into satisfaction prediction, i.e., negative \u2192 unsatisfied, neutral \u2192 met, positive \u2192 well-satisfied. The results of comparisons are reported in Table 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 189, |
|
"end": 196, |
|
"text": "Table 3", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ablation Study", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "In Table 3 , we can observe that Customer outperforms Server by a large margin, which indicates that service satisfaction is mostly related to the sentiments embedded in the customer utterances. However, its performance is still lower than CAMIL f ull , suggesting that server utterances can provide helpful context clues. NoPos performs well but worse than CAMIL f ull since the position information provides prior knowledge for guiding context clue matching. Average and Voting are sub-optimal choices because not all the sentiment distributions contribute equally to the satisfaction polarity and the majority sentiment polarity also does not correlate strongly with it. Table 4 shows another statistics of our datasets, i.e., the distribution of sentiment labels over each service satisfaction polarity, which reflects the imbalanced situation of utterance-level sentiments in real customer service dialogues. In Table 5 , we compare the sentiment prediction results of MILNET, CAMIL r , CAMIL s and CAMIL f ull . CAMIL r and CAMIL s perform worse than CAMIL f ull because they only consider partial context information. CAMIL f ull is the best mainly due to its accurate context clue matching. Thus, our proposed approach is more adaptive to the service satisfaction analysis task based on the customer service dialogues. nese text. For brevity, we use C/S to denote customer/server utterance. Our model predicts the label \"unsatisfied\" correctly and also predicts reasonable sentiment polarities for customer utterances. Considering the context, customer utterances C 1,5,6 are \"negative\" but predicted as \"neutral\" by MILNET because MILNET predicts sentiments only from target utterance itself and ignores context information. In addition, the sentiments of the customer utterances C 4,5 and C 9 tend to have larger influences on deciding the satisfaction polarity because C 4 clearly conveys \"unsatisfied\" attitude, C 5 complains about delay and C 9 criticizes the low service quality.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 3", |
|
"ref_id": "TABREF6" |
|
}, |
|
{ |
|
"start": 674, |
|
"end": 681, |
|
"text": "Table 4", |
|
"ref_id": "TABREF8" |
|
}, |
|
{ |
|
"start": 917, |
|
"end": 924, |
|
"text": "Table 5", |
|
"ref_id": "TABREF9" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ablation Study", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We also visualize the attention weights in Figure 4 to explain our prediction results. For each customer utterance C i , we give the attention weights \u03b2s t on all the server utterances (see Formula 10). Furthermore, we also visualize the attention rates \u03b1 ct on the customer utterances (see Formula 3). Lighter colors denote smaller values. From Figure 4 , we can see that the customer utterances C 4,5,9 have higher attention weights because customer attitudes are intuitively formed at the end of the dialogues (i.e., C 9 ) or determined by explicit sentiments (i.e., C 4 ). In this example, the customer is finally unhappy with the provided solution, and the sentiments did not change through the whole dialogue. We can also see that customer 206 utterances are influenced by server utterances. For example, C 1\u22123 are related to S 2 , C 4,5 are related to S 2,3,7 , and C 6\u22129 are related to S 7 . This again validates the fact that customer utterances are related to the server utterances near them. Meanwhile, customer utterances may provide different types of context clues (i.e., sentiment and reasoning). For a specific server utterance S 7 , it provides explicit sentiment clue for C 9 and also gives reasoning clue for C 8 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 43, |
|
"end": 51, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 346, |
|
"end": 354, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Case Study", |
|
"sec_num": "5.5" |
|
}, |
|
{ |
|
"text": "In-depth Analysis: CAMIL f ull is only trained based on satisfaction labels, thus the laborious acquisition of sentiment labels is unnecessary. However, we would point out that lack of sentiment labels will inevitably lead to difficulties on identifying positive/negative utterances from those neutral ones. We will study to alleviate it in the future.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Case Study", |
|
"sec_num": "5.5" |
|
}, |
|
{ |
|
"text": "Our general observation is that the sentiment of customers at the beginning cannot largely determine the service satisfaction at the end. This is because the sentiment of the customers can vary with different quality of service during the dialogue, and the final service satisfaction results from the overall sentiments of important customer utterances in the dialogue (see the attention weights in Figure 4 ). To verify this, we design a heuristic baseline called Mapping which directly maps the initial negative, neutral and positive sentiment of customer to the corresponding service satisfaction, i.e., unsatisfied, met and satisfied. The satisfaction classification results are displayed in the Table 6 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 399, |
|
"end": 407, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 700, |
|
"end": 707, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Case Study", |
|
"sec_num": "5.5" |
|
}, |
|
{ |
|
"text": "In Table 6 , we can observe that the Mapping method is far worse than our model. One reason is that the service dialogues in our datasets have more than 25 utterances in average (See the statistics in Table 1 ) and contain a large proportion Table 6 : Satisfaction classification comparison between our method and a heuristic mapping method.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 201, |
|
"end": 208, |
|
"text": "Table 1", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 242, |
|
"end": 249, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Case Study", |
|
"sec_num": "5.5" |
|
}, |
|
{ |
|
"text": "of complex interactions. Besides, the sentiment change is closely related to the quality of service and it is very common in our datasets. Thus, using such simple correlation does not work well in our complex dialogue scenarios.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Case Study", |
|
"sec_num": "5.5" |
|
}, |
|
{ |
|
"text": "In this paper, we propose a novel CAMIL model for the SSA task. We first propose a basic MIL approach with the inputs of context-matched customer utterances, then predict the utterancelevel sentiment polarities and dialogue-level satisfaction polarities simultaneously. In addition, we propose a context clue matching mechanism (CCMM) to match any customer utterance with the most related server utterances. Experimental results on two real-world datasets indicate our method clearly outperforms some state-ofthe-art baseline models on the two SSA subtasks, i.e., service satisfaction polarity classification and utterance sentiment classification, which are performed simultaneously. We have made our datasets publicly available.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In the future, we will further improve our method by learning the correlation between the customer utterances and the server utterances. In addition, we will study other interesting tasks in customer service dialogues, such as outcome prediction or opinion change.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Given a specific customer utterance (u7) inFigure 1, the server utterance (u6) triggers the change of customer sentiment from \"neutral\" to \"negative\", and the server utterance (u8) answers the question \"this is not a quality problem\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "From plain sentiment analysis viewpoint, sentence and utterance are treated the same since a dialogue is considered as a chunk of plain texts and the matching between utterances is ignored. So, an utterance is seen as a sentence and a dialogue as a document. Here, we use sentence and utterance interchangeably when it comes to plain sentiment analysis models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We thank all the reviewers for their insightful comments and helpful suggestions. This work is supported by National Key R&D Program of China (2018YFC0830200; 2018YFC0830206).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": "7" |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Multiple instance learning networks for fine-grained sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Stefanos", |
|
"middle": [], |
|
"last": "Angelidis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "TACL", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "17--31", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stefanos Angelidis and Mirella Lapata. 2018. Multi- ple instance learning networks for fine-grained sen- timent analysis. TACL, 6:17-31.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Multi-task dialog act and sentiment recognition on mastodon", |
|
"authors": [ |
|
{ |
|
"first": "Christophe", |
|
"middle": [], |
|
"last": "Cerisara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Somayeh", |
|
"middle": [], |
|
"last": "Jafaritazehjani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adedayo", |
|
"middle": [], |
|
"last": "Oluokun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hoa", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of COLING", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "745--754", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christophe Cerisara, Somayeh Jafaritazehjani, Ade- dayo Oluokun, and Hoa T. Le. 2018. Multi-task di- alog act and sentiment recognition on mastodon. In Proceedings of COLING, pages 745-754.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Recurrent attention network on memory for aspect sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhongqian", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lidong", |
|
"middle": [], |
|
"last": "Bing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "452--461", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peng Chen, Zhongqian Sun, Lidong Bing, and Wei Yang. 2017. Recurrent attention network on mem- ory for aspect sentiment analysis. In Proceedings of EMNLP, pages 452-461.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Stance classification with target-specific neural attention", |
|
"authors": [ |
|
{ |
|
"first": "Jiachen", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruifeng", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yulan", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lin", |
|
"middle": [], |
|
"last": "Gui", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of IJCAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3988--3994", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiachen Du, Ruifeng Xu, Yulan He, and Lin Gui. 2017. Stance classification with target-specific neural at- tention. In Proceedings of IJCAI, pages 3988-3994.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "A word-emoticon mutual reinforcement ranking model for building sentiment lexicon from massive collection of microblogs", |
|
"authors": [ |
|
{ |
|
"first": "Kaisong", |
|
"middle": [], |
|
"last": "Shi Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daling", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ge", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "World Wide Web", |
|
"volume": "18", |
|
"issue": "4", |
|
"pages": "949--967", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shi Feng, Kaisong Song, Daling Wang, and Ge Yu. 2015. A word-emoticon mutual reinforcement rank- ing model for building sentiment lexicon from mas- sive collection of microblogs. World Wide Web, 18(4):949-967.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "User satisfaction of ali wangwang, an instant messenger tool", |
|
"authors": [ |
|
{ |
|
"first": "Jie", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhenghua", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Theory, Methods, Tools and Practice -DUXU 2011, Part of HCI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "414--420", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jie Gao and Zhenghua Zhang. 2011. User satisfac- tion of ali wangwang, an instant messenger tool. In Theory, Methods, Tools and Practice -DUXU 2011, Part of HCI, pages 414-420.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Long short-term memory", |
|
"authors": [ |
|
{ |
|
"first": "Sepp", |
|
"middle": [], |
|
"last": "Hochreiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00fcrgen", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Neural Computation", |
|
"volume": "9", |
|
"issue": "8", |
|
"pages": "1735--1780", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural Computation, 9(8):1735-1780.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Transformation networks for target-oriented sentiment classification", |
|
"authors": [ |
|
{ |
|
"first": "Xin", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lidong", |
|
"middle": [], |
|
"last": "Bing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wai", |
|
"middle": [], |
|
"last": "Lam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bei", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "946--956", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xin Li, Lidong Bing, Wai Lam, and Bei Shi. 2018. Transformation networks for target-oriented senti- ment classification. In Proceedings of ACL, pages 946-956.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "A unified model for opinion target extraction and target sentiment prediction", |
|
"authors": [ |
|
{ |
|
"first": "Xin", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lidong", |
|
"middle": [], |
|
"last": "Bing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piji", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wai", |
|
"middle": [], |
|
"last": "Lam", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6714--6721", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xin Li, Lidong Bing, Piji Li, and Wai Lam. 2019. A unified model for opinion target extraction and target sentiment prediction. In Proceedings of AAAI, pages 6714-6721.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Sentiment analysis and subjectivity", |
|
"authors": [ |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Handbook of Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "627--666", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bing Liu. 2010. Sentiment analysis and subjectivity. In Handbook of Natural Language Processing, Second Edition., pages 627-666.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Interactive attention networks for aspect-level sentiment classification", |
|
"authors": [ |
|
{ |
|
"first": "Dehong", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sujian", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Houfeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of IJCAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4068--4074", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dehong Ma, Sujian Li, Xiaodong Zhang, and Houfeng Wang. 2017. Interactive attention networks for aspect-level sentiment classification. In Proceed- ings of IJCAI, pages 4068-4074.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Detect rumor and stance jointly by neural multi-task learning", |
|
"authors": [ |
|
{ |
|
"first": "Jing", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kam-Fai", |
|
"middle": [], |
|
"last": "Wong", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of WWW", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "585--593", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jing Ma, Wei Gao, and Kam-Fai Wong. 2018. Detect rumor and stance jointly by neural multi-task learn- ing. In Proceedings of WWW, pages 585-593.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Dialoguernn: An attentive RNN for emotion detection in conversations", |
|
"authors": [ |
|
{ |
|
"first": "Navonil", |
|
"middle": [], |
|
"last": "Majumder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soujanya", |
|
"middle": [], |
|
"last": "Poria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devamanyu", |
|
"middle": [], |
|
"last": "Hazarika", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Gelbukh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Cambria", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Navonil Majumder, Soujanya Poria, Devamanyu Haz- arika, Rada Mihalcea, Alexander F. Gelbukh, and Erik Cambria. 2018. Dialoguernn: An attentive RNN for emotion detection in conversations. CoRR, abs/1811.00405.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Efficient estimation of word representations in vector space", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of ICLR Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013. Efficient estimation of word represen- tations in vector space. In Proceedings of ICLR Workshop.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "On the momentum term in gradient descent learning algorithms", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ning Qian", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Neural Networks", |
|
"volume": "12", |
|
"issue": "1", |
|
"pages": "145--151", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ning Qian. 1999. On the momentum term in gradi- ent descent learning algorithms. Neural Networks, 12(1):145-151.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Sentiment classification towards question-answering with hierarchical matching network", |
|
"authors": [ |
|
{ |
|
"first": "Chenlin", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Changlong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingjing", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yangyang", |
|
"middle": [], |
|
"last": "Kang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shoushan", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaozhong", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luo", |
|
"middle": [], |
|
"last": "Si", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guodong", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3654--3663", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chenlin Shen, Changlong Sun, Jingjing Wang, Yangyang Kang, Shoushan Li, Xiaozhong Liu, Luo Si, Min Zhang, and Guodong Zhou. 2018. Senti- ment classification towards question-answering with hierarchical matching network. In Proceedings of EMNLP, pages 3654-3663.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Personalized sentiment classification based on latent individuality of microblog users", |
|
"authors": [ |
|
{ |
|
"first": "Kaisong", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shi", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daling", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ge", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kam-Fai", |
|
"middle": [], |
|
"last": "Wong", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of IJCAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2277--2283", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kaisong Song, Shi Feng, Wei Gao, Daling Wang, Ge Yu, and Kam-Fai Wong. 2015. Personalized sen- timent classification based on latent individuality of microblog users. In Proceedings of IJCAI, pages 2277-2283.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Build emotion lexicon from the mood of crowd via topic-assisted joint non-negative matrix factorization", |
|
"authors": [ |
|
{ |
|
"first": "Kaisong", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ling", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shi", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daling", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chengqi", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of SIGIR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "773--776", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kaisong Song, Wei Gao, Ling Chen, Shi Feng, Daling Wang, and Chengqi Zhang. 2016. Build emotion lexicon from the mood of crowd via topic-assisted joint non-negative matrix factorization. In Proceed- ings of SIGIR, pages 773-776.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Recommendation vs sentiment analysis: A text-driven latent factor model for rating prediction with cold-start awareness", |
|
"authors": [ |
|
{ |
|
"first": "Kaisong", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shi", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daling", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kam-Fai", |
|
"middle": [], |
|
"last": "Wong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chengqi", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of IJCAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2744--2750", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kaisong Song, Wei Gao, Shi Feng, Daling Wang, Kam- Fai Wong, and Chengqi Zhang. 2017. Recommen- dation vs sentiment analysis: A text-driven latent factor model for rating prediction with cold-start awareness. In Proceedings of IJCAI, pages 2744- 2750.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Cold-start aware deep memory network for multi-entity aspectbased sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Kaisong", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lujun", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Changlong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaozhong", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of IJCAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5197--5203", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kaisong Song, Wei Gao, Lujun Zhao, Jun Lin, Chang- long Sun, and Xiaozhong Liu. 2019. Cold-start aware deep memory network for multi-entity aspect- based sentiment analysis. In Proceedings of IJCAI, pages 5197-5203.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Aspect sentiment classification with both word-level and clause-level attention networks", |
|
"authors": [ |
|
{ |
|
"first": "Jingjing", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jie", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shoushan", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yangyang", |
|
"middle": [], |
|
"last": "Kang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luo", |
|
"middle": [], |
|
"last": "Si", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guodong", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of IJCAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4439--4445", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jingjing Wang, Jie Li, Shoushan Li, Yangyang Kang, Min Zhang, Luo Si, and Guodong Zhou. 2018a. As- pect sentiment classification with both word-level and clause-level attention networks. In Proceedings of IJCAI, pages 4439-4445.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Sentiment analysis of peer review texts for scholarly papers", |
|
"authors": [ |
|
{ |
|
"first": "Ke", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaojun", |
|
"middle": [], |
|
"last": "Wan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of SIGIR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "175--184", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ke Wang and Xiaojun Wan. 2018. Sentiment analysis of peer review texts for scholarly papers. In Pro- ceedings of SIGIR, pages 175-184.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Personalized microblog sentiment classification via adversarial cross-lingual multi-task learning", |
|
"authors": [ |
|
{ |
|
"first": "Weichao", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shi", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daling", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yifei", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "338--348", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Weichao Wang, Shi Feng, Wei Gao, Daling Wang, and Yifei Zhang. 2018b. Personalized microblog sentiment classification via adversarial cross-lingual multi-task learning. In Proceedings of EMNLP, pages 338-348.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Personalized microblog sentiment classification via multitask learning", |
|
"authors": [ |
|
{ |
|
"first": "Fangzhao", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yongfeng", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3059--3065", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fangzhao Wu and Yongfeng Huang. 2016. Person- alized microblog sentiment classification via multi- task learning. In Proceedings of AAAI, pages 3059- 3065.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Multi-entity aspect-based sentiment analysis with context, entity and aspect memory", |
|
"authors": [ |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Runqi", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chongjun", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junyuan", |
|
"middle": [], |
|
"last": "Xie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6029--6036", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jun Yang, Runqi Yang, Chongjun Wang, and Junyuan Xie. 2018a. Multi-entity aspect-based sentiment analysis with context, entity and aspect memory. In Proceedings of AAAI, pages 6029-6036.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Multi-entity aspect-based sentiment analysis with context, entity and aspect memory", |
|
"authors": [ |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Runqi", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chongjun", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junyuan", |
|
"middle": [], |
|
"last": "Xie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jun Yang, Runqi Yang, Chongjun Wang, and Junyuan Xie. 2018b. Multi-entity aspect-based sentiment analysis with context, entity and aspect memory. In Proceedings of AAAI.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Hierarchical attention networks for document classification", |
|
"authors": [ |
|
{ |
|
"first": "Zichao", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diyi", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Smola", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1480--1489", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zichao Yang, Diyi Yang, Chris Dyer, Xiaodong He, Alexander J. Smola, and Eduard H. Hovy. 2016. Hi- erarchical attention networks for document classi- fication. In Proceedings of NAACL, pages 1480- 1489.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Multi-instance learning by treating instances as noni.i.d. samples", |
|
"authors": [ |
|
{ |
|
"first": "Zhi-Hua", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu-Yin", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu-Feng", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of ICML", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1249--1256", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhi-Hua Zhou, Yu-Yin Sun, and Yu-Feng Li. 2009. Multi-instance learning by treating instances as non- i.i.d. samples. In Proceedings of ICML, pages 1249- 1256.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "illustrates our prediction results with an example dialogue which is translated from Chi-C1: I have been waiting too long time! Negative [N G;N E] S1 : Dear, I am the customer service agent. May I help you? C2: I haven't received the goods yet! Why? Negative [N G;N G] S2 : Dear, I'm reading your messages. C3: As a rule, it should be received today. Neutral [N E;N E] S3 : Dear, I feel very sorry about it. S4 : I will help you push the workers. C4: You deserve bad ratings! Negative [N G;N G] C5: It is supposed to be shipped here in 3 days. Many days have passed! Negative [N G;N E] S5 : Dear, I have already pushed them. C6: Tell me when the goods will arrive? Negative [N G;N E] C7: I will apply for refund if I don't get it tomorrow. Neutral [N E;N E] S6 : I have urged them. (A few hours passed.) C8: I don't want it. I will apply for refund, immediately! Negative [N G;N G] S7 : I'm sorry for the inconvenience. S8 : Please wait for a few more days. S9 : I have pushed them twice! C9: However, I know this is useless. Negative [N G;N G] Satisfaction: Unsatisfied [U S;M T ] Figure 3: An example dialogue with predictions. C i (S j ) are customer (server) utterances. True labels are underlined. The predictions by our model and MIL-NET are colored in red and blue in the brackets, respectively.", |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"text": "The visualization of attention rates \u03b1 ct (Left in red) and \u03b2 s t (Right in blue) for the given example.", |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF1": { |
|
"content": "<table><tr><td/><td/><td/><td/><td/><td/><td colspan=\"2\">Basic Multiple</td></tr><tr><td/><td/><td/><td/><td/><td/><td colspan=\"2\">Instance Learning</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td>Segment</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td>Encoder</td></tr><tr><td>LSTM</td><td>LSTM</td><td>LSTM</td><td>LSTM</td><td>LSTM</td><td>LSTM</td><td>LSTM</td><td>LSTM</td></tr><tr><td/><td/><td/><td/><td>(</td><td>(</td><td>(</td><td/></tr></table>", |
|
"text": "", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"content": "<table/>", |
|
"text": "Statistics of the datasets we collected.", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF5": { |
|
"content": "<table><tr><td>Methods</td><td>WS F1</td><td>MT F1</td><td>Clothes US F1</td><td>MacroF1</td><td>Acc.</td></tr><tr><td>Server</td><td colspan=\"3\">0.346 0.785 0.589</td><td>0.573</td><td>0.689</td></tr><tr><td>Customer</td><td colspan=\"3\">0.553 0.824 0.663</td><td>0.681</td><td>0.759</td></tr><tr><td>NoPos</td><td colspan=\"3\">0.554 0.838 0.673</td><td>0.688</td><td>0.771</td></tr><tr><td>Average</td><td colspan=\"3\">0.070 0.789 0.347</td><td>0.402</td><td>0.671</td></tr><tr><td>Voting</td><td colspan=\"3\">0.059 0.776 0.046</td><td>0.293</td><td>0.633</td></tr><tr><td colspan=\"4\">CAMIL f ull 0.554 0.844 0.715</td><td>0.704</td><td>0.783</td></tr><tr><td>Methods</td><td>WS F1</td><td>MT F1</td><td>Makeup US F1</td><td>MacroF1</td><td>Acc.</td></tr><tr><td>Server</td><td colspan=\"3\">0.597 0.630 0.745</td><td>0.657</td><td>0.655</td></tr><tr><td>Customer</td><td colspan=\"3\">0.735 0.687 0.790</td><td>0.737</td><td>0.734</td></tr><tr><td>NoPos</td><td colspan=\"3\">0.731 0.742 0.864</td><td>0.779</td><td>0.779</td></tr><tr><td>Average</td><td colspan=\"3\">0.578 0.708 0.834</td><td>0.706</td><td>0.714</td></tr><tr><td>Voting</td><td colspan=\"3\">0.231 0.016 0.553</td><td>0.267</td><td>0.387</td></tr><tr><td colspan=\"4\">CAMIL f ull 0.738 0.745 0.874</td><td>0.786</td><td>0.785</td></tr></table>", |
|
"text": "Results of different satisfaction classification methods. The best results are highlighted.", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF6": { |
|
"content": "<table/>", |
|
"text": "Results of different model configurations.", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF8": { |
|
"content": "<table><tr><td>Methods</td><td>PO F1</td><td>NE F1</td><td>Clothes NG F1</td><td>MacroF1</td><td>Acc.</td></tr><tr><td>MILNET</td><td colspan=\"3\">0.441 0.814 0.404</td><td>0.553</td><td>0.713</td></tr><tr><td>CAMILs</td><td colspan=\"3\">0.545 0.867 0.506</td><td>0.639</td><td>0.787</td></tr><tr><td>CAMILr</td><td colspan=\"3\">0.470 0.870 0.529</td><td>0.623</td><td>0.792</td></tr><tr><td colspan=\"4\">CAMIL f ull 0.484 0.893 0.555</td><td>0.644</td><td>0.824</td></tr><tr><td>Methods</td><td>PO F1</td><td>NE F1</td><td>Makeup NG F1</td><td>MacroF1</td><td>Acc.</td></tr><tr><td>MILNET</td><td colspan=\"3\">0.447 0.387 0.416</td><td>0.417</td><td>0.410</td></tr><tr><td>CAMILs</td><td colspan=\"3\">0.566 0.672 0.501</td><td>0.580</td><td>0.609</td></tr><tr><td>CAMILr</td><td colspan=\"3\">0.556 0.600 0.488</td><td>0.548</td><td>0.561</td></tr><tr><td colspan=\"4\">CAMIL f ull 0.544 0.725 0.516</td><td>0.595</td><td>0.647</td></tr></table>", |
|
"text": "Sentiment distribution in satisfaction classes.", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF9": { |
|
"content": "<table/>", |
|
"text": "Results of sentiment classification by different models. The best results are highlighted.", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |