|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:33:30.483733Z" |
|
}, |
|
"title": "Improving BERT Performance for Aspect-Based Sentiment Analysis", |
|
"authors": [ |
|
{ |
|
"first": "Akbar", |
|
"middle": [], |
|
"last": "Karimi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Parma", |
|
"location": {} |
|
}, |
|
"email": "akbar.karimi@unipr.it" |
|
}, |
|
{ |
|
"first": "Leonardo", |
|
"middle": [], |
|
"last": "Rossi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Parma", |
|
"location": {} |
|
}, |
|
"email": "leonardo.rossi@unipr.it" |
|
}, |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Prati", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Parma", |
|
"location": {} |
|
}, |
|
"email": "andrea.prati@unipr.it" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Aspect-Based Sentiment Analysis (ABSA) addresses the problem of extracting sentiments and their targets from opinionated data such as consumer product reviews. Analyzing the language used in a review is a difficult task that requires a deep understanding of the language. In recent years, deep language models, such as BERT, have shown great progress in this regard. In this work, we propose two simple modules called Parallel Aggregation and Hierarchical Aggregation to be utilized on top of BERT for two main ABSA tasks namely Aspect Extraction (AE) and Aspect Sentiment Classification (ASC). With the proposed modules, we show that the intermediate layers of the BERT architecture can be utilized for the enhancement of the model performance 1 .", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Aspect-Based Sentiment Analysis (ABSA) addresses the problem of extracting sentiments and their targets from opinionated data such as consumer product reviews. Analyzing the language used in a review is a difficult task that requires a deep understanding of the language. In recent years, deep language models, such as BERT, have shown great progress in this regard. In this work, we propose two simple modules called Parallel Aggregation and Hierarchical Aggregation to be utilized on top of BERT for two main ABSA tasks namely Aspect Extraction (AE) and Aspect Sentiment Classification (ASC). With the proposed modules, we show that the intermediate layers of the BERT architecture can be utilized for the enhancement of the model performance 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "In an industry setting, it is extremely important to have a valid conception of how consumers perceive the products. Nowadays, they communicate their perception through their comments on the products, using mostly social networks. They might have positive opinions which can lead to the success of a business or negative ones possibly leading to its demise. Due to the abundance of these views in many areas, their analysis is a time-consuming and labor-intensive task which is why a variety of machine learning techniques such as Support Vector Machines (SVM) (Cortes and Vapnik, 1995; Kiritchenko et al., 2014; Basari et al., 2013) , Maximum Entropy (Jaynes, 1957; Nigam et al., 1999) , Naive Bayes (Duda et al., 1973; Gamallo and Garcia, 2014; Dinu and Iuga, 2012) , and Decision Trees (Quinlan, 1986; Wakade et al., 2012) have been proposed to perform opinion mining.", |
|
"cite_spans": [ |
|
{ |
|
"start": 561, |
|
"end": 586, |
|
"text": "(Cortes and Vapnik, 1995;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 587, |
|
"end": 612, |
|
"text": "Kiritchenko et al., 2014;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 613, |
|
"end": 633, |
|
"text": "Basari et al., 2013)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 636, |
|
"end": 666, |
|
"text": "Maximum Entropy (Jaynes, 1957;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 667, |
|
"end": 686, |
|
"text": "Nigam et al., 1999)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 701, |
|
"end": 720, |
|
"text": "(Duda et al., 1973;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 721, |
|
"end": 746, |
|
"text": "Gamallo and Garcia, 2014;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 747, |
|
"end": 767, |
|
"text": "Dinu and Iuga, 2012)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 789, |
|
"end": 804, |
|
"text": "(Quinlan, 1986;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 805, |
|
"end": 825, |
|
"text": "Wakade et al., 2012)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In recent years, Deep Learning (DL) techniques have been widely utilized due to the increase in computational power and the huge amount of freely available data on the Web (Zhang et al., 2015; Liu et al., 2015; Wang et al., 2016) . One of the areas on which these techniques have had a great impact is Natural Language Processing (NLP) where modeling (i.e. understanding) the language plays a crucial role. BERT (Devlin et al., 2019 ) is a stateof-the-art model of this kind which has become widely utilized in many NLP tasks (Kantor et al., 2019; Davison et al., 2019) as well as in other fields (Peng et al., 2019; Alsentzer et al., 2019) . It has been trained on a large corpus of Wikipedia documents and books in order to learn the language syntax and semantics from the context. The main component of its architecture is called the transformer (Vaswani et al., 2017) block consisting of attention heads. These heads have been designed to pay particular attention to parts of the input sentences that correspond to a particular given task (Vig and Belinkov, 2019) . In this work, we utilize BERT for Aspect-Based Sentiment Analysis (ABSA) tasks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 172, |
|
"end": 192, |
|
"text": "(Zhang et al., 2015;", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 193, |
|
"end": 210, |
|
"text": "Liu et al., 2015;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 211, |
|
"end": 229, |
|
"text": "Wang et al., 2016)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 412, |
|
"end": 432, |
|
"text": "(Devlin et al., 2019", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 526, |
|
"end": 547, |
|
"text": "(Kantor et al., 2019;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 548, |
|
"end": 569, |
|
"text": "Davison et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 597, |
|
"end": 616, |
|
"text": "(Peng et al., 2019;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 617, |
|
"end": 640, |
|
"text": "Alsentzer et al., 2019)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 849, |
|
"end": 871, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 1043, |
|
"end": 1067, |
|
"text": "(Vig and Belinkov, 2019)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our main contribution is the proposal of two simple modules that can help improve the performance of the BERT model. In our models we opt for Conditional Random Fields (CRFs) for the sequence labeling task which yield better results. In addition, our experiments show that training BERT for more number of epochs does not cause the model to overfit. However, after a certain number of training epochs, the learning seems to stop.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Recently, there has been a large body of work which utilizes the BERT model for various tasks in NLP in general such as text classification (Sun et al., 2019b) , question answering (Yang et al., 2019) , summarization (Liu, 2019) and, in particular, ABSA tasks (Hoang et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 140, |
|
"end": 159, |
|
"text": "(Sun et al., 2019b)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 181, |
|
"end": 200, |
|
"text": "(Yang et al., 2019)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 217, |
|
"end": 228, |
|
"text": "(Liu, 2019)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 260, |
|
"end": 280, |
|
"text": "(Hoang et al., 2019)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Using Graph Convolutional Networks (GCNs), Zhao et al. (2020) take into account sentiment dependencies in a sequence. In other words, they show that when there are multiple aspects in a sequence, the sentiment of one of them can affect that of the other one. Making use of this information can increase the performance of the model. Some studies convert the Aspect Extraction (AE) task into a sentence-pair classification task. For instance, Sun et al. (2019a) construct auxiliary sentences using the aspect terms of a sequence. Then, utilizing both sequences, they fine-tune BERT on this specific task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 43, |
|
"end": 61, |
|
"text": "Zhao et al. (2020)", |
|
"ref_id": "BIBREF36" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Word and sentence level representations of a model can also be enriched using domain-specific data. Xu et al. (2019) show this by post-training the BERT model, which they call BERT-PT, on additional restaurant and laptop data. In our experiments, we use their pre-trained model for the initialization of our models. Due to the particular architecture of the BERT model, extra modules can be attached on top of it. add different layers such as an RNN and a CRF layer to perform ABSA in an end-to-end fashion. In our work, we use the same layer modules from the BERT architecture and employ the hidden layers for prediction as well.", |
|
"cite_spans": [ |
|
{ |
|
"start": 100, |
|
"end": 116, |
|
"text": "Xu et al. (2019)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Two of the main tasks in ABSA are Aspect Extraction (AE) and Aspect Sentiment Classification (ASC). While the latter deals with the semantics of a sentence as a whole, the former is concerned with finding which word that sentiment refers to. We briefly describe them in this section.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aspect-Based Sentiment Analysis Tasks", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In AE, the goal is to extract a specific aspect of a product towards which some type of sentiment is expressed in a review. For instance, in the sentence, \"The laptop has a good battery.\", the word battery is the aspect which is extracted. Sometimes, the aspect words can be multiple in which case all of them need to be labeled accordingly. This task can be seen as a sequence labeling task, where the words are assigned a label from the set of three letters namely {B, I, O}. Each word in the sequence can be the beginning word of aspect terms (B), among the aspect terms (I), or not an aspect term (O). The classification of each word into one of these three classes, is accomplished using a fully connected layer on top of the BERT architecture and applying the Softmax function.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aspect Extraction", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In this task, the goal is to extract the sentiment expressed in a review by the consumer. Given a sequence, one of the three classes of Positive, Negative, and Neutral is extracted as the class of that sequence. The representation for this element is embodied in the architecture of the BERT model. For each sequence as input, there are two extra tokens that are used by the BERT model:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aspect Sentiment Classification", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "[CLS], w 1 , w 2 , ..., w n , [SEP ]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aspect Sentiment Classification", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where w i are the sequence words and [CLS] and [SEP ] tokens are concatenated to the sentence in the input stage. While the [CLS] token is there to store the sentiment representation of the sentence, the [SEP ] token is used to separate input sequences in case there are more than one (e.g. in a question answering task). In the final layer of the architecture, a Softmax function is applied to the [CLS] embedding and the class probability is computed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aspect Sentiment Classification", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Deep models can capture deeper knowledge of the language as they grow. As shown by Jawahar et al. (2019) , the initial to middle layers of BERT can extract syntactic information, whereas the language semantics are represented in higher layers. Since extracting the sentence sentiment is semantically demanding, we expect to see this in higher layers of the network. This is the intuition behind our models where we exploit the final layers of the BERT model. The two models that we introduce here are similar in principle, but slightly differ in implementation. Also, for the two tasks, the losses are computed differently. While for the ASC task we utilize cross-entropy loss, for the AE task, we make use of CRFs. The reason for this choice is that the AE task can be treated as sequence labeling. Therefore, taking into account the previous labels in the sequence is of high importance, which is exactly what the CRF layer does. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 83, |
|
"end": 104, |
|
"text": "Jawahar et al. (2019)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "CRFs (Lafferty et al., 2001 ) are a type of graphical models and have been used both in computer vision (e.g. for pixel-level labeling (Zheng et al., 2015) ) and in NLP for sequence labeling.", |
|
"cite_spans": [ |
|
{ |
|
"start": 5, |
|
"end": 27, |
|
"text": "(Lafferty et al., 2001", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 135, |
|
"end": 155, |
|
"text": "(Zheng et al., 2015)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conditional Random Fields", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Since AE can be considered a sequence labeling task, we opt for using a CRF layer in the last part of our models. The justification for the use of a CRF module for AE is that doing so helps the network to take into account the joint distribution of the labels. This can be significant since the labels of sequence words are dependent on the words that appear before them. For instance, as is seen in Figure 1 , the occurrence of the adjective good can give the model a clue that the next word is probably not another adjective. The equation with which the joint probability of the labels is computed is as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 400, |
|
"end": 408, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Conditional Random Fields", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p(y|x) = 1 Z(x) T t=1 exp K k=1 \u03b8 k f k (yt, yt\u22121, xt)", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Conditional Random Fields", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In Formula 1, x is the observed sequence, y is the sequence of labels, and k and t are the indices for feature functions and time steps in the sequence, respectively. The relations between sequence words are represented by using feature functions {f k }. These relations can be strong or weak, or nonexistent at all. They are controlled by their weights {\u03b8 k } which are computed during the training phase. Finally, Z(x) is a normalization factor.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conditional Random Fields", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Rossi et al. (2020) showed that the hidden layers of deep models can be exploited more to extract region specific information. Inspired by their work, we propose a model called P-SUM applying BERT layer modules on each one of the best performing BERT layers. Figure 2 shows the details of this model. We exploit the last four layers of the BERT model by adding one more BERT layer plus a fully connected layer and calculating the loss of that branch on the input data, using a Softmax function and a conditional random fields layer. The reason is that all deeper layers contain most of the related information regarding the task. Therefore, extracting this information from each one of them and combining them can produce richer representations of the semantics. In order to calculate the total loss, the loss values of all branches are summed up which is indicated with \u03a3 notation in the diagram. This is done so, in order to take all the losses into account when optimizing the parameters. However, to compute the network's output logits, we average over the output logits of the four branches.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 259, |
|
"end": 267, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Parallel Aggregation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Our hierarchical aggregation (H-SUM) model is inspired by the use of Feature Pyramid Networks (FPNs) (Lin et al., 2017) . The goal is to extract more semantics from the hidden layers of the BERT model. The architecture of the H-SUM model can be seen in Figure 3 . Here, after applying a BERT layer on each one of the hidden layers, the output is aggregated (element-wise) with the previous layer. At the same time, similar to the P-SUM, each branch produces a loss value which contributes to the total loss equally since the total loss is the summation of all of them.", |
|
"cite_spans": [ |
|
{ |
|
"start": 101, |
|
"end": 119, |
|
"text": "(Lin et al., 2017)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 253, |
|
"end": 261, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Hierarchical Aggregation", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "In order to carry out our experiments, we use the same codebase as Xu et al. (2019) . We ran the experiments on a GPU (GeForce RTX 2070) with 8 GB of memory using batches of 16 for both our models and the BERT-PT model as the baseline. For training, Adam optimizer was used and the learning rate was set to 3e\u22125. From the distributed training data, we used 150 examples as the validation. To evaluate the models, the official scripts were used for the AE tasks and the script from the same codebase was used for the ASC task. Results are reported in F1 for AE and in Accuracy and MF1 for ASC. While F1 score is the harmonic mean of precision and recall, MF1 score is the average of F1 score for each class.", |
|
"cite_spans": [ |
|
{ |
|
"start": 67, |
|
"end": 83, |
|
"text": "Xu et al. (2019)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In our experiments, we utilized laptop and restaurant datasets from SemEval 2014 (Pontiki et al., 2014) Subtask 2 and 2016 (Pontiki et al., 2016) Subtask 1. The collections consist of user reviews which have been annotated manually. Tables 1 and 2 show the statistics of these datasets. In choosing the datasets, we opted for the ones utilized in previous works Xu et al., 2019 ) so that we can draw a reliable comparison between the performance of our models and those ones. Each model is the BERT model using the specified number of layers. 1L means using the first layers, 2L means using the first 2 layers, etc. Accuracy values are percentages.", |
|
"cite_spans": [ |
|
{ |
|
"start": 81, |
|
"end": 103, |
|
"text": "(Pontiki et al., 2014)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 123, |
|
"end": 145, |
|
"text": "(Pontiki et al., 2016)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 363, |
|
"end": 378, |
|
"text": "Xu et al., 2019", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 233, |
|
"end": 248, |
|
"text": "Tables 1 and 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Depending on the depth of the network, it can perform differently. Therefore, we carried out experiments to find out how each layer of the BERT model performs. The results are shown in Figure 4 . As can be seen, better performance is achieved in the deeper layers, especially the last four. Therefore, our modules operate on these four layers to achieve an improved model.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 185, |
|
"end": 194, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Performance of BERT Layers", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "More training can lead to a better performance of the network. However, one risks the peril of overfitting especially when the number of training examples are not considered to be large compared to the number of parameters contained in the model. However, in the case of BERT, as was also observed by , it seems that with more training the model does not overfit although the number of the training data points is relatively small. The reason behind this could be the fact that we are using an already pre-trained model which has seen an enormous amount of data (Wikipedia and Books Corpus). Therefore, we can expect that by performing more training, the model will still be able to generalize.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Increasing Training Epochs", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "The same observation can be made by looking at the validation losses in Figure 5 . In case of an overfit, we would expect the losses to go up and the performance to go down. However, we see that with the increase in loss after the second epoch, the performance still improves for a couple of epochs and then fluctuates in the subsequent ones ( Figure 4 ). This suggests that with more training, the network weights continue to change until they remain almost stable in later epochs, indicating that there is no more learning. From Figure 4 , we see that with 4 or 5 training epochs we get near the maximum performance. Although some later epochs such as 12 yield better results for the 12-layer version, it can be considered negligible.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 72, |
|
"end": 80, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF2" |
|
}, |
|
{ |
|
"start": 344, |
|
"end": 353, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 532, |
|
"end": 540, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Increasing Training Epochs", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Our experimental results show that with the increase of the training epochs the BERT model also improves. These results can be seen in Table 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 135, |
|
"end": 142, |
|
"text": "Table 3", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "To compare our proposed models with Xu et al. (2019) , we perform the same model selection for both of them. Unlike Xu et al. (2019) and who select their best models based on the lowest validation loss, we choose the mod-els trained with four epochs after observing that accuracy goes up on the validation sets ( Figure 4 ). Therefore, in Table 3 , we report the original BERT-PT scores as well as the ones for our model selection. From Table 3 , it can also be seen that the proposed models outperform the newly selected BERT-PT model in both datasets and tasks with improvements in MF1 score as high as +1.78 and +2 for ASC on laptop and restaurant, respectively. It is also worth noting that, in terms of accuracy, the H-SUM module performs better than the P-SUM module in most cases. This could be attributed to the hierarchical structure of the module and the fact that each branch of this module benefits from the information processed in the preceding branch.", |
|
"cite_spans": [ |
|
{ |
|
"start": 36, |
|
"end": 52, |
|
"text": "Xu et al. (2019)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 116, |
|
"end": 132, |
|
"text": "Xu et al. (2019)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 313, |
|
"end": 322, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 340, |
|
"end": 347, |
|
"text": "Table 3", |
|
"ref_id": "TABREF6" |
|
}, |
|
{ |
|
"start": 438, |
|
"end": 445, |
|
"text": "Table 3", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We proposed two simple modules utilizing the hidden layers of the BERT language model to produce (Xu et al., 2018) 81.59 74.37 ----BERT-PT (Xu et al., 2019) 84.26 77.97 78.07 75.08 84.95 76.96 BAT 85 deeper semantic representations of input sequences. The layers are once aggregated in a parallel fashion and once hierarchically. For each branch of the architecture built on top of the selected hidden layers, we compute the loss separately. These losses are then aggregated to produce the final loss of the model. We address aspect extraction using conditional random fields which helps to take into account the joint distribution of the sequence labels to achieve more accurate predictions. Our experiments show that the proposed approaches outperform the post-trained vanilla BERT model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 97, |
|
"end": 114, |
|
"text": "(Xu et al., 2018)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 139, |
|
"end": 156, |
|
"text": "(Xu et al., 2019)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "https://github.com/IMPLabUniPr/ BERT-for-ABSA", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Publicly available clinical BERT embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Alsentzer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Murphy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Boag", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Hung", |
|
"middle": [], |
|
"last": "Weng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Di", |
|
"middle": [], |
|
"last": "Jindi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tristan", |
|
"middle": [], |
|
"last": "Naumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Mcdermott", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2nd Clinical Natural Language Processing Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "72--78", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emily Alsentzer, John Murphy, William Boag, Wei- Hung Weng, Di Jindi, Tristan Naumann, and Matthew McDermott. 2019. Publicly available clini- cal BERT embeddings. In Proceedings of the 2nd Clinical Natural Language Processing Workshop, pages 72-78.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Opinion mining of movie review using hybrid method of support vector machine and particle swarm optimization", |
|
"authors": [ |
|
{ |
|
"first": "Abd", |
|
"middle": [], |
|
"last": "Samad Hasan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Burairah", |
|
"middle": [], |
|
"last": "Basari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hussin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junta", |
|
"middle": [], |
|
"last": "Gede Pramudya Ananta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zeniarja", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Procedia Engineering", |
|
"volume": "53", |
|
"issue": "", |
|
"pages": "453--462", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abd Samad Hasan Basari, Burairah Hussin, I Gede Pra- mudya Ananta, and Junta Zeniarja. 2013. Opinion mining of movie review using hybrid method of sup- port vector machine and particle swarm optimiza- tion. Procedia Engineering, 53:453-462.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Support vector machine", |
|
"authors": [ |
|
{ |
|
"first": "Corinna", |
|
"middle": [], |
|
"last": "Cortes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vladimir", |
|
"middle": [], |
|
"last": "Vapnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Machine learning", |
|
"volume": "20", |
|
"issue": "3", |
|
"pages": "273--297", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Corinna Cortes and Vladimir Vapnik. 1995. Support vector machine. Machine learning, 20(3):273-297.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Commonsense knowledge mining from pretrained models", |
|
"authors": [ |
|
{ |
|
"first": "Joe", |
|
"middle": [], |
|
"last": "Davison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joshua", |
|
"middle": [], |
|
"last": "Feldman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander M", |
|
"middle": [], |
|
"last": "Rush", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1173--1178", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joe Davison, Joshua Feldman, and Alexander M Rush. 2019. Commonsense knowledge mining from pre- trained models. In Proceedings of the 2019 Con- ference on Empirical Methods in Natural Language Processing and the 9th International Joint Confer- ence on Natural Language Processing (EMNLP- IJCNLP), pages 1173-1178.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In NAACL-HLT (1).", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "The naive bayes classifier in opinion mining: in search of the best feature set", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Liviu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iulia", |
|
"middle": [], |
|
"last": "Dinu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Iuga", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "International Conference on Intelligent Text Processing and Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "556--567", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Liviu P Dinu and Iulia Iuga. 2012. The naive bayes classifier in opinion mining: in search of the best feature set. In International Conference on Intelli- gent Text Processing and Computational Linguistics, pages 556-567. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Pattern classification and scene analysis", |
|
"authors": [ |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Richard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Duda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hart", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1973, |
|
"venue": "", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Richard O Duda, Peter E Hart, et al. 1973. Pattern classification and scene analysis, volume 3. Wiley New York.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Citius: A naive-bayes strategy for sentiment analysis on english tweets", |
|
"authors": [ |
|
{ |
|
"first": "Pablo", |
|
"middle": [], |
|
"last": "Gamallo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcos", |
|
"middle": [], |
|
"last": "Garcia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 8th international Workshop on Semantic Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "171--175", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pablo Gamallo and Marcos Garcia. 2014. Citius: A naive-bayes strategy for sentiment analysis on en- glish tweets. In Proceedings of the 8th international Workshop on Semantic Evaluation (SemEval 2014), pages 171-175.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Aspect-based sentiment analysis using bert", |
|
"authors": [ |
|
{ |
|
"first": "Mickel", |
|
"middle": [], |
|
"last": "Hoang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alija", |
|
"middle": [], |
|
"last": "Oskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacobo", |
|
"middle": [], |
|
"last": "Bihorac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Rouces", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 22nd Nordic Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "187--196", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mickel Hoang, Oskar Alija Bihorac, and Jacobo Rouces. 2019. Aspect-based sentiment analysis us- ing bert. In Proceedings of the 22nd Nordic Confer- ence on Computational Linguistics, pages 187-196.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "What does BERT learn about the structure of language", |
|
"authors": [ |
|
{ |
|
"first": "Ganesh", |
|
"middle": [], |
|
"last": "Jawahar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Beno\u00eet", |
|
"middle": [], |
|
"last": "Sagot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Djam\u00e9", |
|
"middle": [], |
|
"last": "Seddah", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3651--3657", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ganesh Jawahar, Beno\u00eet Sagot, and Djam\u00e9 Seddah. 2019. What does BERT learn about the structure of language? In Proceedings of the 57th Annual Meet- ing of the Association for Computational Linguistics, pages 3651-3657.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Information theory and statistical mechanics", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Edwin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Jaynes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1957, |
|
"venue": "Physical review", |
|
"volume": "106", |
|
"issue": "4", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Edwin T Jaynes. 1957. Information theory and statisti- cal mechanics. Physical review, 106(4):620.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Learning to combine grammatical error corrections", |
|
"authors": [ |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Kantor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Katz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leshem", |
|
"middle": [], |
|
"last": "Choshen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edo", |
|
"middle": [], |
|
"last": "Cohen-Karlik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naftali", |
|
"middle": [], |
|
"last": "Liberman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Assaf", |
|
"middle": [], |
|
"last": "Toledo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amir", |
|
"middle": [], |
|
"last": "Menczel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Slonim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Fourteenth Workshop on Innovative Use of NLP for Building Educational Applications", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "139--148", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoav Kantor, Yoav Katz, Leshem Choshen, Edo Cohen- Karlik, Naftali Liberman, Assaf Toledo, Amir Menczel, and Noam Slonim. 2019. Learning to com- bine grammatical error corrections. In Proceedings of the Fourteenth Workshop on Innovative Use of NLP for Building Educational Applications, pages 139-148.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Adversarial training for aspect-based sentiment analysis with BERT", |
|
"authors": [ |
|
{ |
|
"first": "Akbar", |
|
"middle": [], |
|
"last": "Karimi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leonardo", |
|
"middle": [], |
|
"last": "Rossi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Prati", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katharina", |
|
"middle": [], |
|
"last": "Full", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2001.11316" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Akbar Karimi, Leonardo Rossi, Andrea Prati, and Katharina Full. 2020. Adversarial training for aspect-based sentiment analysis with BERT. arXiv preprint arXiv:2001.11316.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Nrc-canada-2014: Detecting aspects and sentiment in customer reviews", |
|
"authors": [ |
|
{ |
|
"first": "Svetlana", |
|
"middle": [], |
|
"last": "Kiritchenko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodan", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Cherry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Saif", |
|
"middle": [], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 8th International Workshop on Semantic Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "437--442", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Svetlana Kiritchenko, Xiaodan Zhu, Colin Cherry, and Saif Mohammad. 2014. Nrc-canada-2014: Detect- ing aspects and sentiment in customer reviews. In Proceedings of the 8th International Workshop on Semantic Evaluation (SemEval 2014), pages 437- 442.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Conditional random fields: Probabilistic models for segmenting and labeling sequence data", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Lafferty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando Cn", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Lafferty, Andrew McCallum, and Fernando CN Pereira. 2001. Conditional random fields: Prob- abilistic models for segmenting and labeling se- quence data.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Exploiting BERT for end-to-end aspect-based sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Xin", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lidong", |
|
"middle": [], |
|
"last": "Bing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenxuan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wai", |
|
"middle": [], |
|
"last": "Lam", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 5th Workshop on Noisy User-generated Text (W-NUT 2019)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "34--41", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xin Li, Lidong Bing, Wenxuan Zhang, and Wai Lam. 2019. Exploiting BERT for end-to-end aspect-based sentiment analysis. In Proceedings of the 5th Work- shop on Noisy User-generated Text (W-NUT 2019), pages 34-41.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Kaiming He, Bharath Hariharan, and Serge Belongie", |
|
"authors": [ |
|
{ |
|
"first": "Tsung-Yi", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Doll\u00e1r", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ross", |
|
"middle": [], |
|
"last": "Girshick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the IEEE conference on computer vision and pattern recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2117--2125", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Belongie. 2017. Feature pyramid networks for object detection. In Proceedings of the IEEE conference on computer vi- sion and pattern recognition, pages 2117-2125.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Finegrained opinion mining with recurrent neural networks and word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Pengfei", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shafiq", |
|
"middle": [], |
|
"last": "Joty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Helen", |
|
"middle": [], |
|
"last": "Meng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 conference on empirical methods in natural language processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1433--1443", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pengfei Liu, Shafiq Joty, and Helen Meng. 2015. Fine- grained opinion mining with recurrent neural net- works and word embeddings. In Proceedings of the 2015 conference on empirical methods in natural language processing, pages 1433-1443.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Fine-tune BERT for extractive summarization", |
|
"authors": [ |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1903.10318" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang Liu. 2019. Fine-tune BERT for extractive sum- marization. arXiv preprint arXiv:1903.10318.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Using maximum entropy for text classification", |
|
"authors": [ |
|
{ |
|
"first": "Kamal", |
|
"middle": [], |
|
"last": "Nigam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Lafferty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "IJCAI-99 workshop on machine learning for information filtering", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "61--67", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kamal Nigam, John Lafferty, and Andrew McCallum. 1999. Using maximum entropy for text classifica- tion. In IJCAI-99 workshop on machine learning for information filtering, volume 1, pages 61-67.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Transfer learning in biomedical natural language processing: An evaluation of BERT and ELMo on ten benchmarking datasets", |
|
"authors": [ |
|
{ |
|
"first": "Yifan", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shankai", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyong", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 18th BioNLP Workshop and Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "58--65", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yifan Peng, Shankai Yan, and Zhiyong Lu. 2019. Transfer learning in biomedical natural language processing: An evaluation of BERT and ELMo on ten benchmarking datasets. In Proceedings of the 18th BioNLP Workshop and Shared Task, pages 58- 65.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Semeval-2014 task 4: Aspect based sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Pontiki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dimitrios", |
|
"middle": [], |
|
"last": "Galanis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Pavlopoulos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Harris", |
|
"middle": [], |
|
"last": "Papageorgiou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 8th international workshop on semantic evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "27--35", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/S14-2004" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maria Pontiki, Dimitrios Galanis, John Pavlopoulos, Harris Papageorgiou, Ion Androutsopoulos, and Suresh Manandhar. 2014. Semeval-2014 task 4: As- pect based sentiment analysis. Proceedings of the 8th international workshop on semantic evaluation (SemEval 2014), pages 27-35.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Semeval-2016 task 5: Aspect based sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Pontiki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dimitris", |
|
"middle": [], |
|
"last": "Galanis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haris", |
|
"middle": [], |
|
"last": "Papageorgiou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ion", |
|
"middle": [], |
|
"last": "Androutsopoulos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Suresh", |
|
"middle": [], |
|
"last": "Manandhar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Al-Smadi", |
|
"middle": [], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mahmoud", |
|
"middle": [], |
|
"last": "Al-Ayyoub", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanyan", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Orph\u00e9e", |
|
"middle": [], |
|
"last": "De Clercq", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 10th international workshop on semantic evaluation (SemEval-2016)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "19--30", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maria Pontiki, Dimitris Galanis, Haris Papageor- giou, Ion Androutsopoulos, Suresh Manandhar, AL- Smadi Mohammad, Mahmoud Al-Ayyoub, Yanyan Zhao, Bing Qin, Orph\u00e9e De Clercq, et al. 2016. Semeval-2016 task 5: Aspect based sentiment anal- ysis. In Proceedings of the 10th international work- shop on semantic evaluation (SemEval-2016), pages 19-30.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Induction of decision trees. Machine learning", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ross", |
|
"middle": [], |
|
"last": "Quinlan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1986, |
|
"venue": "", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "81--106", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Ross Quinlan. 1986. Induction of decision trees. Ma- chine learning, 1(1):81-106.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "A novel region of interest extraction layer for instance segmentation", |
|
"authors": [ |
|
{ |
|
"first": "Leonardo", |
|
"middle": [], |
|
"last": "Rossi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Akbar", |
|
"middle": [], |
|
"last": "Karimi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Prati", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2004.13665" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leonardo Rossi, Akbar Karimi, and Andrea Prati. 2020. A novel region of interest extraction layer for instance segmentation. arXiv preprint arXiv:2004.13665.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Utilizing BERT for aspect-based sentiment analysis via constructing auxiliary sentence", |
|
"authors": [ |
|
{ |
|
"first": "Chi", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luyao", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xipeng", |
|
"middle": [], |
|
"last": "Qiu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "380--385", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chi Sun, Luyao Huang, and Xipeng Qiu. 2019a. Uti- lizing BERT for aspect-based sentiment analysis via constructing auxiliary sentence. In Proceedings of NAACL-HLT, pages 380-385.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "How to fine-tune BERT for text classification?", |
|
"authors": [ |
|
{ |
|
"first": "Chi", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xipeng", |
|
"middle": [], |
|
"last": "Qiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yige", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xuanjing", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "China National Conference on", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chi Sun, Xipeng Qiu, Yige Xu, and Xuanjing Huang. 2019b. How to fine-tune BERT for text clas- sification? In China National Conference on", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Chinese Computational Linguistics", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "194--206", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chinese Computational Linguistics, pages 194-206. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information pro- cessing systems, pages 5998-6008.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Analyzing the structure of attention in a transformer language model", |
|
"authors": [ |
|
{ |
|
"first": "Jesse", |
|
"middle": [], |
|
"last": "Vig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonatan", |
|
"middle": [], |
|
"last": "Belinkov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "63--76", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jesse Vig and Yonatan Belinkov. 2019. Analyzing the structure of attention in a transformer language model. In Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 63-76.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Text mining for sentiment analysis of twitter data", |
|
"authors": [ |
|
{ |
|
"first": "Shruti", |
|
"middle": [], |
|
"last": "Wakade", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chandra", |
|
"middle": [], |
|
"last": "Shekar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kathy", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Liszka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chien-Chung", |
|
"middle": [], |
|
"last": "Chan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the International Conference on Information and Knowledge Engineering (IKE), page 1. The Steering Committee of The World Congress in Computer Science", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shruti Wakade, Chandra Shekar, Kathy J Liszka, and Chien-Chung Chan. 2012. Text mining for senti- ment analysis of twitter data. In Proceedings of the International Conference on Information and Knowledge Engineering (IKE), page 1. The Steering Committee of The World Congress in Computer Sci- ence, Computer . . . .", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Attention-based lstm for aspectlevel sentiment classification", |
|
"authors": [ |
|
{ |
|
"first": "Yequan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minlie", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaoyan", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 conference on empirical methods in natural language processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "606--615", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yequan Wang, Minlie Huang, Xiaoyan Zhu, and Li Zhao. 2016. Attention-based lstm for aspect- level sentiment classification. In Proceedings of the 2016 conference on empirical methods in natural language processing, pages 606-615.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Double embeddings and cnn-based sequence labeling for aspect extraction", |
|
"authors": [ |
|
{ |
|
"first": "Hu", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Shu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S Yu", |
|
"middle": [], |
|
"last": "Philip", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "592--598", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hu Xu, Bing Liu, Lei Shu, and S Yu Philip. 2018. Dou- ble embeddings and cnn-based sequence labeling for aspect extraction. In Proceedings of the 56th Annual Meeting of the Association for Computational Lin- guistics (Volume 2: Short Papers), pages 592-598.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "BERT post-training for review reading comprehension and aspect-based sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Hu", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Shu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S Yu", |
|
"middle": [], |
|
"last": "Philip", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2324--2335", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hu Xu, Bing Liu, Lei Shu, and S Yu Philip. 2019. BERT post-training for review reading comprehen- sion and aspect-based sentiment analysis. In Pro- ceedings of the 2019 Conference of the North Amer- ican Chapter of the Association for Computational Linguistics: Human Language Technologies, Vol- ume 1 (Long and Short Papers), pages 2324-2335.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Data augmentation for BERT fine-tuning in open-domain question answering", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuqing", |
|
"middle": [], |
|
"last": "Xie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luchen", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kun", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1904.06652" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei Yang, Yuqing Xie, Luchen Tan, Kun Xiong, Ming Li, and Jimmy Lin. 2019. Data augmentation for BERT fine-tuning in open-domain question answer- ing. arXiv preprint arXiv:1904.06652.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Character-level convolutional networks for text classification", |
|
"authors": [ |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junbo", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yann", |
|
"middle": [], |
|
"last": "Lecun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 28th International Conference on Neural Information Processing Systems", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "649--657", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiang Zhang, Junbo Zhao, and Yann LeCun. 2015. Character-level convolutional networks for text clas- sification. In Proceedings of the 28th Interna- tional Conference on Neural Information Processing Systems-Volume 1, pages 649-657.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Modeling sentiment dependencies with graph convolutional networks for aspect-level sentiment classification. Knowledge-Based Systems", |
|
"authors": [ |
|
{ |
|
"first": "Pinlong", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Linlin", |
|
"middle": [], |
|
"last": "Hou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ou", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "193", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pinlong Zhao, Linlin Hou, and Ou Wu. 2020. Mod- eling sentiment dependencies with graph convolu- tional networks for aspect-level sentiment classifica- tion. Knowledge-Based Systems, 193:105443.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Conditional random fields as recurrent neural networks. In Proceedings of the IEEE international conference on computer vision", |
|
"authors": [ |
|
{ |
|
"first": "Shuai", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sadeep", |
|
"middle": [], |
|
"last": "Jayasumana", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bernardino", |
|
"middle": [], |
|
"last": "Romera-Paredes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vibhav", |
|
"middle": [], |
|
"last": "Vineet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhizhong", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dalong", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip Hs", |
|
"middle": [], |
|
"last": "Torr", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1529--1537", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shuai Zheng, Sadeep Jayasumana, Bernardino Romera- Paredes, Vibhav Vineet, Zhizhong Su, Dalong Du, Chang Huang, and Philip HS Torr. 2015. Condi- tional random fields as recurrent neural networks. In Proceedings of the IEEE international conference on computer vision, pages 1529-1537.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "An example of representing a sentence with its word labels using CRFs." |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Performance of BERT layers initialized by BERT-PT weights for ASC on RST14 validation data." |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Training and validation losses of the 12-layer BERT model initialized with BERT-PT weights for AE (laptop (a) and restaurant (b)) and ASC (laptop (c) and restaurant (d)). In each figure, the upper lines are validation losses and the bottom lines are training losses, each line corresponding to a seed number." |
|
}, |
|
"TABREF2": { |
|
"html": null, |
|
"text": "", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td colspan=\"2\">: Laptop (LPT14) and restaurant (RST16)</td></tr><tr><td colspan=\"2\">datasets from SemEval 2014 and 2016, respectively, for</td></tr><tr><td colspan=\"2\">AE. S: Number of sentences; A: Number of aspects.</td></tr><tr><td>Train</td><td>Test</td></tr><tr><td colspan=\"2\">Dataset S Pos Neg Neu S Pos Neg Neu</td></tr><tr><td colspan=\"2\">LPT14 2313 987 866 460 638 341 128 169</td></tr><tr><td colspan=\"2\">RST14 3102 2164 805 633 1120 728 196 196</td></tr></table>" |
|
}, |
|
"TABREF3": { |
|
"html": null, |
|
"text": "", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>: Laptop (LPT14) and restaurant (RST14)</td></tr><tr><td>datasets from SemEval 2014 for ASC. S: Number of</td></tr><tr><td>all sentences; Pos, Neg, Neu: Number of positive, neg-</td></tr><tr><td>ative, and neutral sentiments, respectively.</td></tr></table>" |
|
}, |
|
"TABREF6": { |
|
"html": null, |
|
"text": "Comparison of the results for Aspect Extraction (AE) and Aspect Sentiment Classification (ASC). BERT-PT* is the original BERT-PT model using our model selection. The boldfaced numbers show the outperforming models using the same settings. Each score in the table is the average of 9 runs. Results for the cited papers are reported from the corresponding paper. The other models are run for 4 epochs. LPT: Laptop, RST: Restaurant, Acc: Accuracy , MF1: Macro-F1. Values are percentages.", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |