|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:33:18.663076Z" |
|
}, |
|
"title": "End-to-End Annotator Bias Approximation on Crowdsourced Single-Label Sentiment Analysis", |
|
"authors": [ |
|
{ |
|
"first": "Gerhard", |
|
"middle": [], |
|
"last": "Hagerer", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Technical University of Munich", |
|
"location": { |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "ghagerer@mytum.de" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Szabo", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Technical University of Munich", |
|
"location": { |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Koch", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Technical University of Munich", |
|
"location": { |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Luisa", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Technical University of Munich", |
|
"location": { |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ripoll", |
|
"middle": [], |
|
"last": "Dominguez", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Technical University of Munich", |
|
"location": { |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Widmer", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Technical University of Munich", |
|
"location": { |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Maximilian", |
|
"middle": [], |
|
"last": "Wich", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Technical University of Munich", |
|
"location": { |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Hannah", |
|
"middle": [], |
|
"last": "Danner", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Technical University of Munich", |
|
"location": { |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Georg", |
|
"middle": [], |
|
"last": "Groh", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Technical University of Munich", |
|
"location": { |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "grohg@mytum.de" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Sentiment analysis is often a crowdsourcing task prone to subjective labels given by many annotators. It is not yet fully understood how the annotation bias of each annotator can be modeled correctly with state-of-the-art methods. However, resolving annotator bias precisely and reliably is the key to understand annotators' labeling behavior and to successfully resolve corresponding individual misconceptions and wrongdoings regarding the annotation task. Our contribution is an explanation and improvement for precise neural endto-end bias modeling and ground truth estimation, which reduces an undesired mismatch in that regard of the existing state-of-the-art. Classification experiments show that it has potential to improve accuracy in cases where each sample is annotated only by one single annotator. We provide the whole source code publicly 1 and release an own domain-specific sentiment dataset containing 10,000 sentences discussing organic food products 2. These are crawled from social media and are singly labeled by 10 non-expert annotators.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Sentiment analysis is often a crowdsourcing task prone to subjective labels given by many annotators. It is not yet fully understood how the annotation bias of each annotator can be modeled correctly with state-of-the-art methods. However, resolving annotator bias precisely and reliably is the key to understand annotators' labeling behavior and to successfully resolve corresponding individual misconceptions and wrongdoings regarding the annotation task. Our contribution is an explanation and improvement for precise neural endto-end bias modeling and ground truth estimation, which reduces an undesired mismatch in that regard of the existing state-of-the-art. Classification experiments show that it has potential to improve accuracy in cases where each sample is annotated only by one single annotator. We provide the whole source code publicly 1 and release an own domain-specific sentiment dataset containing 10,000 sentences discussing organic food products 2. These are crawled from social media and are singly labeled by 10 non-expert annotators.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Modeling annotator bias in conditions where each data point is annotated by multiple annotators, below referred to as multi-labeled crowdsourcing, has been investigated thoroughly. However, bias modeling when every data point is annotated by only one person, hereafter called singly labeled crowdsourcing, poses a rather specific and difficult challenge. It is in particular relevant for sentiment analysis, where singly labeled crowdsourced datasets are prevalent. This is due to data from the social web which is annotated by the data creators themselves, e.g., rating reviewers or categorizing image uploaders. This might further include multi-media contents such as audio, video, images, and other forms of texts. While the outlook for such forms of data is promising, end-to-end approaches have not yet been fully explored on these types of crowdsourcing applications.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "With these benefits in mind, we propose a neural network model tailored for such data with singly labeled crowdsourced annotations. It computes a latent truth for each sample and the correct bias of every annotator while also considering input feature distribution during training. We modify the loss function such that the annotator bias converges towards the actual confusion matrix of the regarding annotator and thus models the annotator biases correctly. This is novel, as previous methods either require a multi-labeled crowdsourcing setting (Dawid and Skene, 1979; Hovy et al., 2013) or do not produce a correct annotator bias during training which would equal the confusion matrix, see Zeng et al. (2018, figure 5) and Rodrigues and Pereira (2018, figure 3) . A correct annotator-or annotator-group bias, however, is necessary to derive correct conclusions about the respective annotator behavior. This is especially important for highly unreliable annotators who label a high number of samples randomly -a setting, in which our proposed approach maintains its correctness, too.", |
|
"cite_spans": [ |
|
{ |
|
"start": 548, |
|
"end": 571, |
|
"text": "(Dawid and Skene, 1979;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 572, |
|
"end": 590, |
|
"text": "Hovy et al., 2013)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 694, |
|
"end": 722, |
|
"text": "Zeng et al. (2018, figure 5)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 727, |
|
"end": 765, |
|
"text": "Rodrigues and Pereira (2018, figure 3)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our contributions are as follows. We describe the corresponding state-of-the-art for crowdsourcing algorithms and tasks in section 2. Our neural network model method for end-to-end crowdsourcing modeling is explained in section 3, which includes a mathematical explanation that our linear bias modeling approach yields the actual confusion matrices. The experiments in section 4 underline our proof, show that the model handles annotator bias correctly as opposed to previous models, and demonstrate how the approach impacts classification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Problem definition. The need for data in the growing research areas of machine learning has given rise to the generalized use of crowdsourcing. This method of data collection increases the amount of data, saves time and money but comes at the potential cost of data quality. One of the key metrics of data quality is annotator reliability, which can be affected by various factors. For instance, the lack of rater accountability can entail spamming. Spammers are annotators that assign labels randomly and significantly reduce the quality of the data. Raykar and Yu (2012) and Hovy et al. (2013) addressed this issue by detecting spammers based on rater trustworthiness and the SpEM algorithm. However, spammers are not the only source of label inconsistencies. The varied personal backgrounds of crowd workers often lead to annotator biases that affect the overall accuracy of the models. Several works have previously ranked crowd workers (Hovy et al., 2013; Whitehill et al., 2009; Yan et al., 2010) , clustered annotators (Peldszus and Stede, 2013) , captured sources of bias (Wauthier and Jordan, 2011) or modeled the varying difficulty of the annotation tasks (Carpenter, 2008; Whitehill et al., 2009; Welinder et al., 2010) allowing for the elimination of unreliable labels and the improvement of the model predictions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 577, |
|
"end": 595, |
|
"text": "Hovy et al. (2013)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 941, |
|
"end": 960, |
|
"text": "(Hovy et al., 2013;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 961, |
|
"end": 984, |
|
"text": "Whitehill et al., 2009;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 985, |
|
"end": 1002, |
|
"text": "Yan et al., 2010)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 1026, |
|
"end": 1052, |
|
"text": "(Peldszus and Stede, 2013)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 1166, |
|
"end": 1183, |
|
"text": "(Carpenter, 2008;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 1184, |
|
"end": 1207, |
|
"text": "Whitehill et al., 2009;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 1208, |
|
"end": 1230, |
|
"text": "Welinder et al., 2010)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Crowdsourcing Algorithms", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Ground truth estimation. One common challenge in crowdsourced datasets is the ground truth estimation. When an instance has been annotated multiple times, a simple yet effective technique is to implement majority voting or an extension thereof (TIAN and Zhu, 2015; Yan et al., 2010) . More sophisticated methods focus on modeling label uncertainty (Spiegelhalter and Stovin, 1983) or implementing bias correction (Snow et al., 2008; Camilleri and Williams, 2020) . These techniques are commonly used for NLP applications or computer vision tasks (Smyth et al., 1995; Camilleri and Williams, 2020) . Most of these methods for inferring the ground truth labels use variations of the EM algorithm by Dawid and Skene (1979) , which estimates annotator biases and latent labels in turns. We use its recent extension called the Fast Dawid-Skene algorithm (Sinha et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 244, |
|
"end": 264, |
|
"text": "(TIAN and Zhu, 2015;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 265, |
|
"end": 282, |
|
"text": "Yan et al., 2010)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 348, |
|
"end": 380, |
|
"text": "(Spiegelhalter and Stovin, 1983)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 413, |
|
"end": 432, |
|
"text": "(Snow et al., 2008;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 433, |
|
"end": 462, |
|
"text": "Camilleri and Williams, 2020)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 546, |
|
"end": 566, |
|
"text": "(Smyth et al., 1995;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 567, |
|
"end": 596, |
|
"text": "Camilleri and Williams, 2020)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 697, |
|
"end": 719, |
|
"text": "Dawid and Skene (1979)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 827, |
|
"end": 869, |
|
"text": "Dawid-Skene algorithm (Sinha et al., 2018)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Crowdsourcing Algorithms", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "End-to-end approaches. The Dawid-Skene algorithm models the raters' abilities as respective bias matrices. Similar examples include GLAD (Whitehill et al., 2009) or MACE (Hovy et al., 2013) , which infer true labels as well as labeler expertise and sample difficulty. These approaches infer the ground truth only from the labels and do not consider the input features. End-to-end approaches learn a latent truth, annotator information, and feature distribution jointly during actual model training (Zeng et al., 2018; Khetan et al., 2017; Rodrigues and Pereira, 2018) . Some works use the EM algorithm (Raykar et al., 2009) , e.g., to learn sample difficulties, annotator representations and ground truth estimates (Platanios et al., 2020) . However, the EM algorithm has drawbacks, namely that it can be unstable and more expensive to train (Chu et al., 2020) . LTNet models imperfect annotations derived from various image datasets using a single latent truth neural network and datasetspecific bias matrices (Zeng et al., 2018) . A similar approach is used for crowdsourcing, representing annotator bias by confusion matrix estimates (Rodrigues and Pereira, 2018). Both approaches show a mismatch between the bias and how it is modeled, see Zeng et al. (2018, figure 5) and Rodrigues and Pereira (2018, figure 3) . We adapt the LTNet architecture (see section 3), as it can be used to model crowd annotators on singly labeled sentiment analysis, which, to our knowledge, is not done yet in the context of annotator bias modeling. Recent works about noisy labeling in sentiment analysis do not consider annotator bias (Wang et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 137, |
|
"end": 161, |
|
"text": "(Whitehill et al., 2009)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 170, |
|
"end": 189, |
|
"text": "(Hovy et al., 2013)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 498, |
|
"end": 517, |
|
"text": "(Zeng et al., 2018;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 518, |
|
"end": 538, |
|
"text": "Khetan et al., 2017;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 539, |
|
"end": 567, |
|
"text": "Rodrigues and Pereira, 2018)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 602, |
|
"end": 623, |
|
"text": "(Raykar et al., 2009)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 715, |
|
"end": 739, |
|
"text": "(Platanios et al., 2020)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 842, |
|
"end": 860, |
|
"text": "(Chu et al., 2020)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1011, |
|
"end": 1030, |
|
"text": "(Zeng et al., 2018)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 1244, |
|
"end": 1272, |
|
"text": "Zeng et al. (2018, figure 5)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1277, |
|
"end": 1315, |
|
"text": "Rodrigues and Pereira (2018, figure 3)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1620, |
|
"end": 1639, |
|
"text": "(Wang et al., 2019)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Crowdsourcing Algorithms", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Sentiment and Emotion. Many works use the terms sentiment and emotion interchangeably (Demszky et al., 2020; Kossaifi et al., 2021) , whereas sentiment is directed towards an entity (Munezero et al., 2014) but emotion not necessarily. Both can be mapped to valence, which is the affective quality of goodness (high) or badness (low). Since emotion recognition often lacks annotated data, crowdsourced sentiment annotations can be beneficial (Snow et al., 2008) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 86, |
|
"end": 108, |
|
"text": "(Demszky et al., 2020;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 109, |
|
"end": 131, |
|
"text": "Kossaifi et al., 2021)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 182, |
|
"end": 205, |
|
"text": "(Munezero et al., 2014)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 441, |
|
"end": 460, |
|
"text": "(Snow et al., 2008)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Crowdsourced Sentiment Datasets", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Multi-Labeled Crowdsourced Datasets. Crowdsourced datasets, such as, Google GoEmotion (Demszky et al., 2020) and the SEWA database (Kossaifi et al., 2021) , usually contain multiple labels per sample and require their aggregation using ground truth estimation. Multi-labeled datasets are preferable to singly labeled ones on limited data. Snow et al. (2008) proved that many non-expert annotators give a better performance than a few expert annotators and are cheaper in comparison.", |
|
"cite_spans": [ |
|
{ |
|
"start": 86, |
|
"end": 108, |
|
"text": "(Demszky et al., 2020)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 131, |
|
"end": 154, |
|
"text": "(Kossaifi et al., 2021)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 339, |
|
"end": 357, |
|
"text": "Snow et al. (2008)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Crowdsourced Sentiment Datasets", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Singly Labeled Crowdsourced Datasets. Singly labeled datasets are an option given a fixed budget and unlimited data. Khetan et al. (2017) showed that it is possible to model worker quality with single labels even when the annotations are made by non-experts. Thus, multiple annotations can not only be redundant but come at the expense of fewer labeled samples. For singly labeled data, it can be distinguished between reviewer annotators and external annotators. Reviewer annotators rate samples they created themselves. It is common in forums for product and opinion reviews where a review is accompanied by a rating. As an example of this, we utilized the TripAdvisor dataset (Thelwall, 2018) . Further candidates are the Amazon review dataset (Ni et al., 2019) , the Large Movie Review Dataset (Maas et al., 2011) , and many more comprising sentiment. External annotators annotate samples they have not created. Experts are needed for complex annotation tasks requiring domain knowledge. These are not crowdsourced, since the number of annotators is small and fixed. More common are external non-experts. Snow et al. (2008) showed that multi-labeled datasets annotated by non-expert improve performance. Khetan et al. (2017) showed that it also performs well in the singly labeled case. Thus, datasets made of singly labeled non-expert annotations can be cheaper, faster, and obtain performances comparable to those comprised of different types of annotations. Our organic dataset is annotated accordingly, see section 4.3.", |
|
"cite_spans": [ |
|
{ |
|
"start": 117, |
|
"end": 137, |
|
"text": "Khetan et al. (2017)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 679, |
|
"end": 695, |
|
"text": "(Thelwall, 2018)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 747, |
|
"end": 764, |
|
"text": "(Ni et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 798, |
|
"end": 817, |
|
"text": "(Maas et al., 2011)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 1109, |
|
"end": 1127, |
|
"text": "Snow et al. (2008)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 1208, |
|
"end": 1228, |
|
"text": "Khetan et al. (2017)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Crowdsourced Sentiment Datasets", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The model choice is determined by the fact that some of our datasets are small. Thus, the model should have only few trainable parameters to avoid overfitting. We utilize a simple attention mechanism, as it is comon for NLP applications. The input words w j are mapped to their word embeddings e w j \u2208 R D with j = 1, ..., S, and S being the input sequence length and D the dimensionality of the input word vectors. These are GloVe embeddings of 50 dimensions pre-trained on 6B English tokens of the \"Wikipedia 2014 + Gigaword 5\" dataset (Pennington et al., 2014) . Then, it computes the attention a i of each word using the trainable attention vector e \u2208 R D via a j = e \u2022 e w j . It takes the accordingly weighted average z n = S i=1 a i \u2022 e w i of the word vectors with n denoting the n-th sample or input text.", |
|
"cite_spans": [ |
|
{ |
|
"start": 538, |
|
"end": 563, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Basic Modeling Architecture", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Finally, the classification head is the sigmoid of a simple linear layer p n = softmax(W \u2022 z n + b), with W \u2208 R L\u00d7D and b \u2208 R as the weights of the model. We refer to this last layer and to p n as latent truth layer or latent truth.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Basic Modeling Architecture", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "On top of the basic modeling architecture, the biases of the annotators are modeled as seen in figure 1. The theory is explained by Zeng et al. (2018) as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 132, |
|
"end": 150, |
|
"text": "Zeng et al. (2018)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "End-to-End Crowdsourcing Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\"The labeling preference bias of different annotators cause inconsistent annotations. Each annotator has a coder-specific bias in assigning the samples to some categories. Mathematically speaking, let", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "End-to-End Crowdsourcing Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "X = {x 1 , . . . , x N } denote the data, y c = [y c 1 , . . . , y c N ]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "End-to-End Crowdsourcing Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "the regarding annotations by coder c. Inconsistent annotations assume that P (y c n |x n ) = P (y\u0109 n |x n ), \u2200x n \u2208 X , c =\u0109, where P (y i n |x n ) denotes the probability distribution that coder c annotates sample x n .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "End-to-End Crowdsourcing Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "LTNet assumes that each sample x n has a latent truth y n . Without the loss of generality, let us suppose that LTNet classifies x n into the category i with probability P (y n = i|x n ; \u0398), where \u0398 denotes the network parameters. If x n has a ground truth of i, coder c has an opportunity of \u03c4 c ij = P (y c n = j|y n = i) to annotate x n as j, where y c n is the annotation of sample x n by coder c. Then, the sample x n is annotated as label j by coder c with a probability of P (y c n = j|x n ; \u0398) = L i=1 P (y c n = j|y n = i)P (y n = i|x n ; \u0398), where L is the number of categories and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "End-to-End Crowdsourcing Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "L j=1 P (y c n = j|y n = i) = L j=1 \u03c4 c ij = 1. T c = [\u03c4 c", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "End-to-End Crowdsourcing Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "ij ] L\u00d7L denotes the transition matrix (also referred to as annotator bias) with rows summed to 1 while [p n ] i = P (y n = i|x n ; \u0398) is modeled by the base network (Zeng et al., 2018) . We define [p c n ] j = P (y c n = j|x n ; \u0398). Given the annotations from C different coders on the data, LTNet aims to maximize the log-likelihood of the observed annotations. Therefore, parameters in LTNet are learned by minimizing the cross entropy loss of the predicted and observed annotations for each coder c.\"", |
|
"cite_spans": [ |
|
{ |
|
"start": 166, |
|
"end": 185, |
|
"text": "(Zeng et al., 2018)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "End-to-End Crowdsourcing Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We represent the annotations and predictions as vectors of dimensionality L such that y c n is one-hot encoded and p c n contains the probabilities for all class predictions of sample n. The (Zeng et al., 2018 ). The base model is a simple attention model with a single trainable attention vector e and linear layer with parameters W and b. The transition matrices T c are the bias matrices from the annotators c. \"Each row of the transition matrix T is constrained to be summed to 1\" (Zeng et al., 2018 ). The base model is inspired by ABAE (He et al., 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 191, |
|
"end": 209, |
|
"text": "(Zeng et al., 2018", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 485, |
|
"end": 503, |
|
"text": "(Zeng et al., 2018", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 542, |
|
"end": 559, |
|
"text": "(He et al., 2017)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "End-to-End Crowdsourcing Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "cross entropy loss function is then defined as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "End-to-End Crowdsourcing Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2212 C n=1 N n=1 log(p c n \u2022 y c n ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "End-to-End Crowdsourcing Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The logarithm in the cross entropy formula leads to an exponential increase in the loss for false negative predictions, i.e., when the predicted probability [p c n ] i for a ground truth class i is close to 0 and [y c n ] i is 1. This increase can be helpful in conditions with numerical underflow, but at the same time this introduces a disproportionate high loss of the other class due to constantly misclassified items. This happens in crowdsourcing, for example, when one annotator is a spammer assigning a high degree of random annotations, which in turn leads to a disproportionally higher loss caused by that annotator's many indistinguishable false negative annotations. Consequentially, the bias matrix of that annotator would be biased towards the false classes. Moreover, this annotator would cause overall more loss than other annotators, which can harm the model training for layers which are shared among all annotators, e.g., the latent truth layer when it is actually trained.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Effect of Logarithm Removal on Cross Entropy", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "By omitting the log function, these effects are removed and all annotators and datapoints contribute with the same weight to the overall gradient and to the trainable annotator bias matrices, independent of the annotator and his respective annotation behavior. As a consequence, the annotator matrices are capable of modeling the real annotator bias, which is the mismatch between an annotation y c n of coder c and the latent truth prediction p n . If p n is one-hot encoded, this results to the according classification ratios of samples and is equal to the confusion matrix, without an algorithmically encoded bias towards a certain group of items. This is shown mathematically in the following, where it is assumed that the base network is fixed, i.e., backpropagation is performed through the bias matrices and stops at the latent truth layer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Effect of Logarithm Removal on Cross Entropy", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Effect of Logarithm Removal on Cross Entropy", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "define N = L k=1 N k", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Effect of Logarithm Removal on Cross Entropy", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "O = \u2212 N n=1 p c n \u2022 y c n = \u2212 L k=1 N k m=1 p km \u2022 T c \u2022 y c km = \u2212 L k=1 N k m=1 p km \u2022 \uf8eb \uf8ec \uf8ed \u03c4 c 1k . . . \u03c4 c Lk \uf8f6 \uf8f7 \uf8f8 = L k=1 N k m=1 L h=1 \u2212 [p km ] h \u2022 \u03c4 c hk", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Effect of Logarithm Removal on Cross Entropy", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Apparently, the derivation step between the second and third line would not work if there would be the logarithm from the standard cross entropy. Now, let the learning rate be \u03b1, the number of epochs E and the starting values of the initialized bias matrix Figure 2: Male and female bias (top) and confusion (bottom) matrices which are trained using cross entropy loss with and without logarithm in two different settings. The left side has only the original annotations, whereas the right side has 80% random male labels.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Effect of Logarithm Removal on Cross Entropy", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "(\u03c4 c hk ) E = (\u03c4 c hk ) 0 + E i=1 \u03b1 \u2202O \u2202\u03c4 c hk i = (\u03c4 c hk ) 0 + E i=1 \u03b1 N k m=1 \u2212 [p km ] h i = (\u03c4 c hk ) 0 \u2212 \u03b1E N k m=1 [p km ] h =:Z hk", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Effect of Logarithm Removal on Cross Entropy", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "For sufficiently large E the starting values (\u03c4 c hk ) 0 become infinitesimally small in comparison to the second additive term and thus negligible. As we are normalizing the rows of (T c ) E after training so that the bias fulfills our probability constraint defined in section 3.2, the linear factor \u2212\u03b1E is canceled out, too. Thus, the bias matrix T c results in the row normalized version of [Z hk ] L\u00d7L . Z hk is the sum of the latent truth probabilities for class h on all samples of a ground truth class k. If we assume that the latent truth is one hot encoded, [Z hk ] L\u00d7L equals to the confusion matrix, of which the k-th column sums up to the number of samples in class k:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Effect of Logarithm Removal on Cross Entropy", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "L h=1 Z hk = L h=1 N k m=1 [p km ] h = N k m=1 1 = N k .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Effect of Logarithm Removal on Cross Entropy", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The following experiment compares how training with and without the logarithm in the cross entropy loss affects the LTNet bias matrices empirically. The mathematical explanations in section 3.3 suggest that the logarithm removal from cross entropy leads to an annotator bias matrix identical to the confusion matrix, which would not be the case for the normal cross entropy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bias Convergence", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Experiment Description. For the data, we use the TripAdvisor dataset from Thelwall et al. consisting of 11, 900 English consumer reviews about hotels from male and female reviewers plus their self-assigned sentiment ratings (Thelwall, 2018) . We use the gender information to split the data into two annotator groups, male and female, from which we model each one with a corresponding bias matrix. We exclude neutral ratings and binarize the rest to be either positive or negative. As the dataset is by default completely balanced regarding gender and sentiment at each rating level, it is a natural candidate for correct bias approximation. Throughout our experiments, we use 70% of the obtained data as training, 20% as validation and the 10% remaining as test sets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 224, |
|
"end": 240, |
|
"text": "(Thelwall, 2018)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bias Convergence", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Similar to the explanation in 3.3, the base model with its latent truth predictions is pre-trained on all samples and then frozen when the bias matrices are trained. The stochastic gradient descent method is used to optimize the parameters, as other widespread optimizers, such as Adam and AdaGrad (the latter introduced that feature first), introduce anin our case undesired -bias towards certain directions in the gradient space, namely by using the previous learning steps to increase or decrease the weights along dimensions with larger or smaller gradients (Kingma and Ba, 2014). For all four sub-experiments, we train the base models with varying hyperparameters and pick the best based on accuracy. We train the transition matrices 50 times with different learning rates from the interval [1e\u22126, 1e\u22123]. The batch size is 64. In addition to a normal training setting, we add random annotations to 80% of the instances annotated by male subjects, such that 40% from them are wrongly annotated. This results in four models: with and without logarithm in the cross entropy, with and without random male annotations, each time respectively with two annotator group matrices, male and female -see figure 2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bias Convergence", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Results. The bias matrices of the models with the best accuracy are picked and presented in figure 2 in the top row. The corresponding confusion matrices depict the mismatch between latent truth predictions and annotator-group labels in the bottom row. The bias matrices trained without logarithm in the cross entropy are almost identical to the confusion matrices in all cases, which never holds for the normal cross entropy. This confirms our mathematically justified hypothesis given in section 3.3 that the logarithm removal from cross entropy leads to a correctly end-to-end-trained bias. In this context, it is relevant that the related work shows the same mismatch between bias and confusion matrix when applying cross entropy loss without explaining nor tackling this difference, see Zeng et al. (2018, figure 5) and Rodrigues and Pereira (2018, figure 3) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 792, |
|
"end": 820, |
|
"text": "Zeng et al. (2018, figure 5)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 825, |
|
"end": 853, |
|
"text": "Rodrigues and Pereira (2018,", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 854, |
|
"end": 863, |
|
"text": "figure 3)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Bias Convergence", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "It is worth mentioning for the 80% random male annotations that these are correctly modeled without cross entropy, too, as opposed to normal cross entropy. If the goal is to model the annotator bias correctly in an end-to-end manner, this might be considered as particularly useful to analyze annotator behavior, e.g., spammer detection, later on.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bias Convergence", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Finally, we report how much variation the bias matrices show during training for cross entropy with and without logarithm. As mentioned in the experiment description, we trained each model 50 times. The elements of the resulting bias matrices with standard cross entropy have on average 7.7% standard deviation compared to 2.8% without logarithm. It can be concluded that the bias produced by standard cross entropy is less stable during training, which raises questions about the overall reliability of its outcome.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bias Convergence", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In summary, the observations confirm our assumptions that cross entropy without logarithm captures annotator bias correctly in contrast to standard cross entropy. This carries the potential to detect spammer annotators and leads to an overall more stable training.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bias Convergence", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In the following paragraphs, we demonstrate how to estimate the ground truth based on the latent truth from LTNet. This is then compared to two other kinds of ground truth estimates. All of them can be applied in a single label crowdsourcing setting.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ground Truth Estimation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The Dawid-Skene algorithm (Sinha et al., 2018 ) is a common approach to calculate a ground truth in crowdsourcing settings where there are multiple annotations given on each sample. This method is, for instance, comparable to majority voting, which tends to give similar results for ground truth estimation. However, in single label crowdsourcing settings, these approaches are not feasible. Under single label conditions, the Dawid-Skene ground truth estimates equal to the single label annotations. This is given by Sinha et al. (2018, formula 1) in the expectation step, where the probability for a class k \u2208 1, 2, ..., L given the annotations is defined as", |
|
"cite_spans": [ |
|
{ |
|
"start": 4, |
|
"end": 45, |
|
"text": "Dawid-Skene algorithm (Sinha et al., 2018", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ground Truth Estimation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "P (Y n = k|k n 1 , k n 2 , ..., k n L ) = C c=1 P (k nc |Y n = k) \u2022 P (Y n = k) L k=1 C c=1 P (k nc |Y n = k) \u2022 P (Y n = k) .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ground Truth Estimation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Here, n is the sample to be estimated, C the number of annotators for that sample, n 1 , n 2 , ..., n C the set of annotators who labeled this sample, k n 1 , k n 2 , ..., k n C the set of annotation choices chosen by these C participants for sample n, and Y n the correct (or aggregated) label to be estimated for the sample n (Sinha et al., 2018).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ground Truth Estimation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In the single label case C equals to 1, which reduces the formula to P (Y n = k|k n 1 , k n 2 , ..., k n C ) = P (Y n = k|k n 1 ). This in turn equals to 1 if k is the assigned class label to sample n by annotator n 1 , or 0 otherwise. In other words, if there is only one annotation per sample, this annotation defines the ground truth. Since different annotators do not assign labels on the same samples, there is also no way to model mutual dependencies of each other.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ground Truth Estimation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "LTNet, however, provides estimates for all variables from this formula. P (Y n = k) is the prior and is approximated by the latent truth probability for class k of sample n. P (k nc |Y n = k) is the probability that, assuming k would be the given class, sample n is labeled as k nc by annotator n c . This equals to \u03c4 c kn c ,k , i.e., the entries of the LTNet bias matrix T c of annotator c.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ground Truth Estimation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Eventually, the LTNet ground truth can be derived by choosing k such that the probability P (Y n = k|k n 1 , ...) is maximized:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ground Truth Estimation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "k ground truth = arg max k P (Y n = k|k n 1 , ...).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ground Truth Estimation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We will leverage this formula to derive and evaluate the ground truth generated by LTNet.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ground Truth Estimation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Experiment We calculate the LTNet ground truth according to the previous formula on the organic dataset, a singly labeled crowdsourcing dataset, which is described in Section 4.3. To demonstrate the feasibility and the soundness of the approach, we compare it with two other ways of deriving a ground truth. Firstly, we apply the fast Dawid-Skene algorithm on the annotator-wise class predictions from the LTNet model. Secondly, we train a base network on all annotations while ignoring which annotator annotated which samples. Eventually, we compare the ground truth estimates of all three methods by calculating Cohen's kappa coefficient (Cohen, 1960) , which is a commonly used standard to analyze correspondence of annotations between two annotators or pseudo annotators. The training procedures and the dataset are identical to the ones from the classification experiments in Section 4.3.", |
|
"cite_spans": [ |
|
{ |
|
"start": 640, |
|
"end": 653, |
|
"text": "(Cohen, 1960)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ground Truth Estimation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Results As can be seen on Table 1 , the three ground truth estimators are all highly correlated to each other, since the minimal Cohen's kappa score is 0.98. Apparently, there are only minor differences in the ground truth estimates, if any at all. Thus, it appears that the ground truths generated by the utilized methods are mostly identical. Especially, the LTNet and Dawid-Skene ground truths are highly correlated with a kappa of 99%. The base model, which is completely unaware of which annotator labeled which sample, is slightly more distant with kappas between 98% -99%. So with respect to the ground truth itself, we do not see a specific benefit of any method, since they are almost identical.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 26, |
|
"end": 33, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ground Truth Estimation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "However, it must be noted that LTNet additionally produces correct bias matrices of every annotator during model training, which is not the case for the base model. Correct biases have the potential to help improving model performance by analyzing which annotators tend to be more problematic and weighting them accordingly.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ground Truth Estimation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We conduct classification comparing LTNet in different configurations on three datasets with crowdsourced sentiment annotations to discuss the poten-", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Classification", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Basic Ground truths Skene LTNet Model Dawid Skene 1.0000 0.9905 0.9832 LTNet 0.9905 1.0000 0.9918 Base Model 0.9832 0.9918 1.0000 tial related benefits and drawbacks of our proposed loss modification. Emotion Dataset. The emotion dataset consists of 100 headlines and their ratings for valence by multiple paid Amazon Mechanical Turk annotators (Snow et al., 2008) . Each headline is annotated by 10 annotators, and each annotated several but not all headlines. We split the interval-based valence annotations to positive, neutral, or negative. Throughout our experiments, we used 70% of the obtained data as training, 20% as validation and 10% as test sets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 345, |
|
"end": 364, |
|
"text": "(Snow et al., 2008)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dawid", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Organic Food Dataset. With this paper, we publish our dataset containing social media texts discussing organic food related topics. Source. The dataset was crawled in late 2017 from Quora, a social question-and-answer website. To retrieve relevant articles from the platform, the search terms \"organic\", \"organic food\", \"organic agriculture\", and \"organic farming\" are used. The texts are deemed relevant by a domain expert if articles and comments deal with organic food or agriculture and discuss the characteristics, advantages, and disadvantages of organic food production and consumption. From the filtered data, 1,373 comments are chosen and 10,439 sentences annotated. Annotation Scheme. Each sentence has sentiment (positive, negative, neutral) and entity, the sentiment target, annotated. We isolate sentiments expressed about organic against non-organic entities, whereas for classification only singly labeled samples annotated as organic entity are considered. Consumers discuss organic or non-organic products, farming practices, and companies. Annotation Procedure. The data is annotated by each of the 10 coders separately; it is divided into 10 batches of 1, 000 sentences for each annotator and none of these batches shared any sentences between each other. 4616 sentences contain organic entities with 39% neutral, 32% positive, and 29% negative sentiments. After annotation, the data splits are 80% training, 10% validation, and 10% test set. The data distribution over sentiments, entities, and attributes remains similar on all splits. Experiment Description. The experiment is conducted on the TripAdvisor, organic, and emotion datasets introduced in section 4.3. We compare the classification of the base network with three different LTNet configurations. Two of them are trained using cross entropy with and without logarithm. For the emotion dataset, we compute the bias matrices and the ground truth for the base model using the fast Dawid-Skene algorithm (Sinha et al., 2018) . This is possible for the emotion dataset, since each sample is annotated by several annotators.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1960, |
|
"end": 2002, |
|
"text": "Dawid-Skene algorithm (Sinha et al., 2018)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dawid", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We apply pre-training for each dataset by training several base models with different hyperparameters and pick the best based on accuracy. Eventually, we train the LTNet model on the crowdsourcing annotation targets by fine-tuning the best base model together with the bias matrices for the respective annotators. The bias matrices are initialized as row normalized identity matrices plus uniform noise around 0.1. The models are trained 50 times with varying learning rates sampled from between [1e\u22126, 1e\u22123]. A batch size of 64 is used.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dawid", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Results. The classification results of the models are presented in table 2 with their macro F1 score and accuracy as derived via predictions on the test sets. LTNet generally shows a significant classification advantage over the base model. On all three databases, LTNet approaches performed better on the test datasets. The LTNet improvement has a big delta of 11% + / \u2212 1% when there is a low annotation reliability (organic and emotion datasets) and a small delta < 1% with high reliability (TripAdvisor) 3 . Apparently, model each 3 Unreliable means that the provided annotations have a low annotator separately gives significant advantages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dawid", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Regarding the comparison between cross entropy (CE) loss with and without logarithm on LT-Net, the removed logarithm shows better classification results on organic (+3%) and TripAdvisor data (+0.3%) and worse on the emotion dataset (\u22123%). This means that on both of the singly labeled crowdsourcing datasets, the removal of the logarithm from the loss function leads to better predictions than the standard CE loss. On the multi-labeled emotion dataset, however, this does not appear to be beneficial. As this data has only a very small test set of 100 samples, it is not clear if this result is an artifact or not. Concluding, the log removal appears to be beneficial on large datasets, where the bias is correctly represented in the training and test data splits, such that it can be modeled correctly by the denoted approach. It shall be noted, that it is not clear if that observation would hold generally. We advice to run the same experiments multiple times on many more datasets to substantiate this finding.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dawid", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We showed the efficacy of LTNet for modeling crowdsourced data and the inherent bias accurately and robustly. The bias matrices produced by our modified LTNet improve such that they are more similar to the actual bias between the latent truth and ground truth. Moreover, the produced bias shows high robustness under very noisy conditions making the approach potentially usable outside of lab conditions. The latent truth, which is a hidden layer below all annotator biases, can be used for ground truth estimation in our single label crowdsourcing scenario, providing almost identical ground truth estimates as pseudo labeling. Classification on three crowdsourced datasets show that LTNet approaches outperfom naive approaches not considering each annotator separately. The proposed log removal from the loss function showed better results on singly labeled crowdsourced datasets, but this observation needs further experiments to be substantiated. Furthermore, there might be many use cases to explore the approach on other tasks than sentiment analysis.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Cohen's kappa inter-rater reliability on the organic 51.09% and emotion (27.47%) dataset. On the organic dataset we prepared a separate data partition of 300 sentences annotated by all annotators for that purpose. For the TripAdvisor dataset, it is apparent that the correspondence of annotations between the two annotator groups (male and female) is high as can be seen in figure 2 for cross entropy without logarithm.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "https://github.com/theonlyandreas/ end-to-end-crowdsourcing 2 https://github.com/ghagerer/organic-dataset", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "The extended dawid-skene model", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Camilleri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"I" |
|
], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Machine Learning and Knowledge Discovery in Databases", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "121--136", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael P. J. Camilleri and Christopher K. I. Williams. 2020. The extended dawid-skene model. In Machine Learning and Knowledge Discovery in Databases, pages 121-136, Cham. Springer Interna- tional Publishing.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Multilevel bayesian models of categorical data annotation", |
|
"authors": [ |
|
{ |
|
"first": "Bob", |
|
"middle": [], |
|
"last": "Carpenter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bob Carpenter. 2008. Multilevel bayesian models of categorical data annotation.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Learning from crowds by modeling common confusions", |
|
"authors": [ |
|
{ |
|
"first": "Zhendong", |
|
"middle": [], |
|
"last": "Chu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jing", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongning", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhendong Chu, Jing Ma, and Hongning Wang. 2020. Learning from crowds by modeling common confu- sions.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "A coefficient of agreement for nominal scales. Educational and psychological measurement", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Cohen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1960, |
|
"venue": "", |
|
"volume": "20", |
|
"issue": "", |
|
"pages": "37--46", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Cohen. 1960. A coefficient of agreement for nominal scales. Educational and psychological mea- surement, 20(1):37-46.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Maximum likelihood estimation of observer error-rates using the em algorithm", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Dawid", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Skene", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1979, |
|
"venue": "Applied Statistics", |
|
"volume": "28", |
|
"issue": "1", |
|
"pages": "20--28", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. P. Dawid and A. M. Skene. 1979. Maximum likeli- hood estimation of observer error-rates using the em algorithm. Applied Statistics, 28(1):20-28.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Goemotions: A dataset of fine-grained emotions", |
|
"authors": [ |
|
{ |
|
"first": "Dorottya", |
|
"middle": [], |
|
"last": "Demszky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dana", |
|
"middle": [], |
|
"last": "Movshovitz-Attias", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeongwoo", |
|
"middle": [], |
|
"last": "Ko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Cowen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gaurav", |
|
"middle": [], |
|
"last": "Nemade", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sujith", |
|
"middle": [], |
|
"last": "Ravi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dorottya Demszky, Dana Movshovitz-Attias, Jeong- woo Ko, Alan Cowen, Gaurav Nemade, and Sujith Ravi. 2020. Goemotions: A dataset of fine-grained emotions.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "An unsupervised neural attention model for aspect extraction", |
|
"authors": [ |
|
{ |
|
"first": "Ruidan", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hwee Tou", |
|
"middle": [], |
|
"last": "Wee Sun Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dahlmeier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "388--397", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P17-1036" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ruidan He, Wee Sun Lee, Hwee Tou Ng, and Daniel Dahlmeier. 2017. An unsupervised neural attention model for aspect extraction. In Proceedings of the 55th Annual Meeting of the Association for Compu- tational Linguistics (Volume 1: Long Papers), pages 388-397, Vancouver, Canada. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Learning whom to trust with MACE", |
|
"authors": [ |
|
{ |
|
"first": "Dirk", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Taylor", |
|
"middle": [], |
|
"last": "Berg-Kirkpatrick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dirk Hovy, Taylor Berg-Kirkpatrick, Ashish Vaswani, and Eduard Hovy. 2013. Learning whom to trust with MACE. In Proceedings of the 2013 Confer- ence of the North American Chapter of the Associ- ation for Computational Linguistics: Human Lan- guage Technologies, Atlanta, Georgia. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Learning from noisy singly-labeled data", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Khetan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zachary", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Lipton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anima", |
|
"middle": [], |
|
"last": "Anandkumar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Khetan, Zachary C. Lipton, and Anima Anand- kumar. 2017. Learning from noisy singly-labeled data.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P. Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. CoRR, abs/1412.6980.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Sewa db: A rich database for audiovisual emotion and sentiment research in the wild", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Kossaifi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Walecki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Panagakis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Schmitt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Ringeval", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Pandit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Toisoul", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Schuller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Star", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Hajiyev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Pantic", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", |
|
"volume": "43", |
|
"issue": "3", |
|
"pages": "1022--1040", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/TPAMI.2019.2944808" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Kossaifi, R. Walecki, Y. Panagakis, J. Shen, M. Schmitt, F. Ringeval, J. Han, V. Pandit, A. Toisoul, B. Schuller, K. Star, E. Hajiyev, and M. Pantic. 2021. Sewa db: A rich database for audio- visual emotion and sentiment research in the wild. IEEE Transactions on Pattern Analysis and Machine Intelligence, 43(3):1022-1040.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Learning word vectors for sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Maas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raymond", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Daly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Potts", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrew L. Maas, Raymond E. Daly, Peter T. Pham, Dan Huang, Andrew Y. Ng, and Christopher Potts. 2011. Learning word vectors for sentiment analysis. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies, Portland, Oregon, USA. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Are they different? affect, feeling, emotion, sentiment, and opinion detection in text", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Munezero", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Montero", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Sutinen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Pajunen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "IEEE Transactions on Affective Computing", |
|
"volume": "5", |
|
"issue": "2", |
|
"pages": "101--111", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/TAFFC.2014.2317187" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Munezero, C. S. Montero, E. Sutinen, and J. Pa- junen. 2014. Are they different? affect, feel- ing, emotion, sentiment, and opinion detection in text. IEEE Transactions on Affective Computing, 5(2):101-111.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Justifying recommendations using distantly-labeled reviews and fine-grained aspects", |
|
"authors": [ |
|
{ |
|
"first": "Jianmo", |
|
"middle": [], |
|
"last": "Ni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiacheng", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Mcauley", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "188--197", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1018" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jianmo Ni, Jiacheng Li, and Julian McAuley. 2019. Justifying recommendations using distantly-labeled reviews and fine-grained aspects. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th Interna- tional Joint Conference on Natural Language Pro- cessing (EMNLP-IJCNLP), pages 188-197, Hong Kong, China. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Ranking the annotators: An agreement study on argumentation structure", |
|
"authors": [ |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Peldszus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manfred", |
|
"middle": [], |
|
"last": "Stede", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 7th Linguistic Annotation Workshop and Interoperability with Discourse", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "196--204", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andreas Peldszus and Manfred Stede. 2013. Ranking the annotators: An agreement study on argumenta- tion structure. In Proceedings of the 7th Linguistic Annotation Workshop and Interoperability with Dis- course, pages 196-204, Sofia, Bulgaria. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "GloVe: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/D14-1162" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. GloVe: Global vectors for word representation. In Proceedings of the 2014 Confer- ence on Empirical Methods in Natural Language Processing (EMNLP), pages 1532-1543, Doha, Qatar. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Learning from imperfect annotations", |
|
"authors": [ |
|
{ |
|
"first": "Maruan", |
|
"middle": [], |
|
"last": "Emmanouil Antonios Platanios", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Al-Shedivat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Xing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emmanouil Antonios Platanios, Maruan Al-Shedivat, Eric Xing, and Tom Mitchell. 2020. Learning from imperfect annotations.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Eliminating spammers and ranking annotators for crowdsourced labeling tasks", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Vikas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shipeng", |
|
"middle": [], |
|
"last": "Raykar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "J. Mach. Learn. Res", |
|
"volume": "13", |
|
"issue": "", |
|
"pages": "491--518", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vikas C. Raykar and Shipeng Yu. 2012. Eliminating spammers and ranking annotators for crowdsourced labeling tasks. J. Mach. Learn. Res., 13:491-518.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Supervised learning from multiple experts: whom to trust when everyone lies a bit", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Vikas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shipeng", |
|
"middle": [], |
|
"last": "Raykar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Linda", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Charles", |
|
"middle": [], |
|
"last": "Jerebko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerardo", |
|
"middle": [ |
|
"Hermosillo" |
|
], |
|
"last": "Florin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luca", |
|
"middle": [], |
|
"last": "Valadez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Linda", |
|
"middle": [], |
|
"last": "Bogoni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Moy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "ICML", |
|
"volume": "382", |
|
"issue": "", |
|
"pages": "889--896", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/1553374.1553488" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vikas C. Raykar, Shipeng Yu, Linda H. Zhao, Anna K. Jerebko, Charles Florin, Gerardo Her- mosillo Valadez, Luca Bogoni, and Linda Moy. 2009. Supervised learning from multiple experts: whom to trust when everyone lies a bit. In ICML, volume 382 of ACM International Conference Pro- ceeding Series, pages 889-896. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Deep learning from crowds", |
|
"authors": [ |
|
{ |
|
"first": "Filipe", |
|
"middle": [], |
|
"last": "Rodrigues", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francisco", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Pereira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "1611--1618", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Filipe Rodrigues and Francisco C. Pereira. 2018. Deep learning from crowds. Proceedings of the AAAI Con- ference on Artificial Intelligence, 32(1):1611-1618.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Fast dawid-skene: A fast vote aggregation scheme for sentiment classification", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Vaibhav", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sukrut", |
|
"middle": [], |
|
"last": "Sinha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Rao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Vineeth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Balasubramanian", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vaibhav B Sinha, Sukrut Rao, and Vineeth N Balasub- ramanian. 2018. Fast dawid-skene: A fast vote ag- gregation scheme for sentiment classification.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Inferring ground truth from subjective labelling of venus images", |
|
"authors": [ |
|
{ |
|
"first": "Padhraic", |
|
"middle": [], |
|
"last": "Smyth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Usama", |
|
"middle": [], |
|
"last": "Fayyad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Burl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pietro", |
|
"middle": [], |
|
"last": "Perona", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierre", |
|
"middle": [], |
|
"last": "Baldi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "1085--1092", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Padhraic Smyth, Usama Fayyad, Michael Burl, Pietro Perona, and Pierre Baldi. 1995. Inferring ground truth from subjective labelling of venus images. In Advances in Neural Information Processing Systems, volume 7, pages 1085-1092, San Diego, CA. MIT Press.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Cheap and fast-but is it good?: evaluating non-expert annotations for natural language tasks", |
|
"authors": [ |
|
{ |
|
"first": "Rion", |
|
"middle": [], |
|
"last": "Snow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O'", |
|
"middle": [], |
|
"last": "Brendan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Connor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "EMNLP '08: Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rion Snow, Brendan O'Connor, Daniel Jurafsky, and Andrew Y. Ng. 2008. Cheap and fast-but is it good?: evaluating non-expert annotations for natural language tasks. In EMNLP '08: Proceedings of the Conference on Empirical Methods in Natural Lan- guage Processing, Morristown, NJ, USA. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "An analysis of repeated biopsies following cardiac transplantation", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Spiegelhalter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Stovin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1983, |
|
"venue": "Statistics in medicine", |
|
"volume": "2", |
|
"issue": "1", |
|
"pages": "33--40", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "DJ Spiegelhalter and PGI Stovin. 1983. An analysis of repeated biopsies following cardiac transplantation. Statistics in medicine, 2(1):33-40.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Gender bias in sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Thelwall", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Online Information Review", |
|
"volume": "42", |
|
"issue": "3", |
|
"pages": "343--354", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mike Thelwall. 2018. Gender bias in sentiment analy- sis. Online Information Review, 42(3):343-354.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Max-margin majority voting for learning from crowds", |
|
"authors": [ |
|
{ |
|
"first": "Tian", |
|
"middle": [], |
|
"last": "Tian", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "28", |
|
"issue": "", |
|
"pages": "1621--1629", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "TIAN TIAN and Jun Zhu. 2015. Max-margin major- ity voting for learning from crowds. In Advances in Neural Information Processing Systems, volume 28, pages 1621-1629, San Diego, CA. Curran Asso- ciates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Learning with noisy labels for sentence-level sentiment classification", |
|
"authors": [ |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chaozhuo", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yan", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianrui", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hao Wang, Bing Liu, Chaozhuo Li, Yan Yang, and Tianrui Li. 2019. Learning with noisy labels for sentence-level sentiment classification. CoRR, abs/1909.00124.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Bayesian bias mitigation for crowdsourcing", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Fabian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [ |
|
"I" |
|
], |
|
"last": "Wauthier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Jordan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "NIPS", |
|
"volume": "24", |
|
"issue": "", |
|
"pages": "1800--1808", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fabian L. Wauthier and Michael I. Jordan. 2011. Bayesian bias mitigation for crowdsourcing. In NIPS, volume 24, pages 1800-1808, San Diego, CA. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "The multidimensional wisdom of crowds", |
|
"authors": [ |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Welinder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steve", |
|
"middle": [], |
|
"last": "Branson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Serge", |
|
"middle": [], |
|
"last": "Belongie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pietro", |
|
"middle": [], |
|
"last": "Perona", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 23rd International Conference on Neural Information Processing Systems", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter Welinder, Steve Branson, Serge Belongie, and Pietro Perona. 2010. The multidimensional wisdom of crowds. In Proceedings of the 23rd International Conference on Neural Information Processing Sys- tems -Volume 2, Red Hook, NY, USA. Curran Asso- ciates Inc.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Whose vote should count more: Optimal integration of labels from labelers of unknown expertise", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Whitehill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Ruvolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tingfan", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Bergsma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Javier", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Movellan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "NIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2035--2043", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Whitehill, Paul Ruvolo, Tingfan Wu, Jacob Bergsma, and Javier R. Movellan. 2009. Whose vote should count more: Optimal integration of la- bels from labelers of unknown expertise. In NIPS, pages 2035-2043, San Diego, CA. Curran Asso- ciates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Modeling annotator expertise: Learning when everybody knows a bit of something", |
|
"authors": [ |
|
{ |
|
"first": "Yan", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rmer", |
|
"middle": [], |
|
"last": "Rosales", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Glenn", |
|
"middle": [], |
|
"last": "Fung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Schmidt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerardo", |
|
"middle": [ |
|
"Hermosillo" |
|
], |
|
"last": "Valadez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luca", |
|
"middle": [], |
|
"last": "Bogoni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Linda", |
|
"middle": [], |
|
"last": "Moy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jennifer", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Dy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "AISTATS", |
|
"volume": "9", |
|
"issue": "", |
|
"pages": "932--939", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yan Yan, Rmer Rosales, Glenn Fung, Mark W. Schmidt, Gerardo Hermosillo Valadez, Luca Bo- goni, Linda Moy, and Jennifer G. Dy. 2010. Mod- eling annotator expertise: Learning when everybody knows a bit of something. In AISTATS, volume 9 of JMLR Proceedings, pages 932-939, Chia Laguna Resort, Sardinia, Italy. JMLR.org.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Facial expression recognition with inconsistently annotated datasets", |
|
"authors": [ |
|
{ |
|
"first": "Jiabei", |
|
"middle": [], |
|
"last": "Zeng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shiguang", |
|
"middle": [], |
|
"last": "Shan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xilin", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ECCV (13)", |
|
"volume": "11217", |
|
"issue": "", |
|
"pages": "227--243", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiabei Zeng, Shiguang Shan, and Xilin Chen. 2018. Fa- cial expression recognition with inconsistently an- notated datasets. In ECCV (13), volume 11217 of Lecture Notes in Computer Science, pages 227-243, Red Hook, NY, USA. Springer.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Architecture of the end-to-end trainable LTNet" |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "as the number of all samples and N k of class k = 1, ..., L. L is the number of classes, T c = [\u03c4 c ij ] L\u00d7L the bias matrix of coder c, p n the latent truth vector of sample n = 1, ..., N , and p c n the annotator prediction. p km is the latent truth of the m-th sample of class k with m = 1, ..., N k , same for x km and y c km . The loss without logarithm is" |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "(\u03c4 c lh ) 0 . The bias parameters \u03c4 c lh of the bias matrix T c are updated according to" |
|
}, |
|
"TABREF0": { |
|
"type_str": "table", |
|
"html": null, |
|
"text": "", |
|
"content": "<table><tr><td>: Cohen's kappa scores between three different</td></tr><tr><td>ground truth estimation methods applied on the singly</td></tr><tr><td>labeled crowdsourced organic dataset.</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF2": { |
|
"type_str": "table", |
|
"html": null, |
|
"text": "Macro F1 scores and accuracy measured in the classification experiment.", |
|
"content": "<table/>", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |