|
{ |
|
"paper_id": "2022", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T16:43:33.972204Z" |
|
}, |
|
"title": "Overconfidence in the Face of Ambiguity with Adversarial Data", |
|
"authors": [ |
|
{ |
|
"first": "Margaret", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Washington", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Washington", |
|
"location": {} |
|
}, |
|
"email": "julianjm@cs.washington.edu" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Adversarial data collection has shown promise as a method for building models which are more robust to the spurious correlations that generally appear in naturalistic data. However, adversarially-collected data may itself be subject to biases, particularly with regard to ambiguous or arguable labeling judgments. Searching for examples where an annotator disagrees with a model might over-sample ambiguous inputs, and filtering the results for high inter-annotator agreement may under-sample them. In either case, training a model on such data may produce predictable and unwanted biases. In this work, we investigate whether models trained on adversarially-collected data are miscalibrated with respect to the ambiguity of their inputs. Using Natural Language Inference models as a testbed, we find no clear difference in accuracy between naturalistically and adversarially trained models, but our model trained only on adversarially-sourced data is considerably more overconfident of its predictions and demonstrates worse calibration, especially on ambiguous inputs. This effect is mitigated, however, when naturalistic and adversarial training data are combined.", |
|
"pdf_parse": { |
|
"paper_id": "2022", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Adversarial data collection has shown promise as a method for building models which are more robust to the spurious correlations that generally appear in naturalistic data. However, adversarially-collected data may itself be subject to biases, particularly with regard to ambiguous or arguable labeling judgments. Searching for examples where an annotator disagrees with a model might over-sample ambiguous inputs, and filtering the results for high inter-annotator agreement may under-sample them. In either case, training a model on such data may produce predictable and unwanted biases. In this work, we investigate whether models trained on adversarially-collected data are miscalibrated with respect to the ambiguity of their inputs. Using Natural Language Inference models as a testbed, we find no clear difference in accuracy between naturalistically and adversarially trained models, but our model trained only on adversarially-sourced data is considerably more overconfident of its predictions and demonstrates worse calibration, especially on ambiguous inputs. This effect is mitigated, however, when naturalistic and adversarial training data are combined.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "End-to-end neural network models have had widespread success on standard benchmarks in NLP (Wang et al., 2018 (Wang et al., , 2019 Lee et al., 2017; Dozat and Manning, 2017) . However, models trained with maximum-likelihood objectives under the standard Empirical Risk Minimization paradigm are liable to succeed in these settings by fitting to features or correlations in the data which are ultimately not representative of the underlying task and fail to generalize out of distribution, e.g., under domain shift or adversarial perturbation (Gururangan et al., 2018; Ilyas et al., 2019) . One promising method to overcome this difficulty is to * Equal contribution. move past the ERM paradigm and learn or evaluate causal features which are invariant across domains or distributions of data. While methods to do this often require the use of explicitly specified domains of data (Peters et al., 2016; Arjovsky et al., 2020) , a more lightweight approach is adversarial evaluation and training (Nie et al., 2020a; , in which annotators deliberately search for examples on which a model fails. Adversarial data annotation has been applied for a variety of tasks, including question answering (Bartolo et al., 2020) , natural language inference (Nie et al., 2020a) , hate speech detection , and sentiment analysis (Potts et al., 2021) . Adversarial data can help reduce spurious correlations in existing data (Bartolo et al., 2020) , expose a model's shortcomings in evaluation, and aid in training more robust models (Wallace et al., 2022) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 91, |
|
"end": 109, |
|
"text": "(Wang et al., 2018", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 110, |
|
"end": 130, |
|
"text": "(Wang et al., , 2019", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 131, |
|
"end": 148, |
|
"text": "Lee et al., 2017;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 149, |
|
"end": 173, |
|
"text": "Dozat and Manning, 2017)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 542, |
|
"end": 567, |
|
"text": "(Gururangan et al., 2018;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 568, |
|
"end": 587, |
|
"text": "Ilyas et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 880, |
|
"end": 901, |
|
"text": "(Peters et al., 2016;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 902, |
|
"end": 924, |
|
"text": "Arjovsky et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 994, |
|
"end": 1013, |
|
"text": "(Nie et al., 2020a;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 1191, |
|
"end": 1213, |
|
"text": "(Bartolo et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1243, |
|
"end": 1262, |
|
"text": "(Nie et al., 2020a)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 1312, |
|
"end": 1332, |
|
"text": "(Potts et al., 2021)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 1407, |
|
"end": 1429, |
|
"text": "(Bartolo et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1516, |
|
"end": 1538, |
|
"text": "(Wallace et al., 2022)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "However, the process of developing adversarial data is imperfect, and adversarial data may itself not resemble naturalistic distributions. For example, Phang et al. (2021) find that the AFLITE adversarial filtering algorithm , designed to find challenging examples in existing datasets, disproportionately favors contentious examples with annotator disagreement. This is suggestive that adversarially collected datasets, where humans actively try to fool a model, may be subject to these same biases Indeed, Phang et al. also show that adversarially-collected datasets may disproportionately penalize models that are similar to the one used during data collection. The qualitative properties of adversariallycollected data also vary depending on the adversary used during data collection, as shown by for the Adversarial NLI dataset (Nie et al., 2020a) . For these reasons, it is not clear what a model's performance under adversarial evaluation implies about its performance characteristics on naturalistic distributions, nor is it clear how training on adversarial data aids a model's perfor-mance in natural settings.", |
|
"cite_spans": [ |
|
{ |
|
"start": 152, |
|
"end": 171, |
|
"text": "Phang et al. (2021)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 833, |
|
"end": 852, |
|
"text": "(Nie et al., 2020a)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this work, we focus on the interplay of adversarial learning and evaluation with ambiguity, or annotator disagreement. Just as adversarial filtering may over-sample ambiguous inputs (Phang et al., 2021) , adversarial annotators may produce strange, ambiguous, or disputable inputs as they employ tricks to fool a model in the adversarial setting. To preempt this issue and ensure data quality, adversarial data collection methods filter out examples with low human agreement (Nie et al., 2020a ), but it's possible that this approach could over-correct for the issue and under-sample such inputs in comparison to naturalistic data. For this reason, it is plausible that models trained on adversarially-collected data may be miscalibrated against the ambiguity of their inputs, forming a predictable blind spot.", |
|
"cite_spans": [ |
|
{ |
|
"start": 185, |
|
"end": 205, |
|
"text": "(Phang et al., 2021)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 478, |
|
"end": 496, |
|
"text": "(Nie et al., 2020a", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We investigate this issue by training models on naturalistically and adversarially collected datasets, then comparing their performance with respect to gold annotator distributions. As a testbed, we use Natural Language Inference, an NLP benchmark task with already-available adversarial data (Nie et al., 2020a) and full annotator distributions (Nie et al., 2020b) . We find no clear difference in accuracy between naturalistically and adversarially trained models, but our model trained only on adversarially-sourced data is considerably more overconfident of its predictions and demonstrates worse calibration, especially on ambiguous inputs. On the other hand, including both naturalistic data in training as well -as is standard practice (Nie et al., 2020a ) -mitigates these issues. While our results do not raise alarms about standard practices with adversarial data, they suggest that we should keep in mind the importance of including naturalistic data in training regimes moving forward. 1 2 Background: Robustness and Adversarial Data", |
|
"cite_spans": [ |
|
{ |
|
"start": 293, |
|
"end": 312, |
|
"text": "(Nie et al., 2020a)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 346, |
|
"end": 365, |
|
"text": "(Nie et al., 2020b)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 743, |
|
"end": 761, |
|
"text": "(Nie et al., 2020a", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Suppose we are interested in learning a conditional probability distribution p(y | x). The classical machine learning approach of Empirical Risk Minimization does so with the use of input data drawn from a distribution D:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "argmin \u03b8 E x\u223cD,y\u223cp(\u2022|x) \u2212 log p(y|x, \u03b8),", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "where \u03b8 are the model parameters. However, this method can do a poor job of approximating p(", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "y | x)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "when x is drawn from very different distributions than D. One approach which has been used to address this is robust optimization, which minimizes the worst-case loss subject to some constraints (Madry et al., 2018; Ghaoui and Lebret, 1997; Wald, 1945) . We can view robust optimization as solving a minimax problem:", |
|
"cite_spans": [ |
|
{ |
|
"start": 195, |
|
"end": 215, |
|
"text": "(Madry et al., 2018;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 216, |
|
"end": 240, |
|
"text": "Ghaoui and Lebret, 1997;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 241, |
|
"end": 252, |
|
"text": "Wald, 1945)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "argmin \u03b8 max D\u2208D E x\u223cD,y\u223cp(\u2022|x) \u2212 log p(y|x, \u03b8),", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "where D is a space of possible input distributions, and D is adversarially chosen among them. This formulation invites the question: what if D includes all possible distributions? Then we are free to find any x which the model gets wrong, and optimizing the loss effectively should produce a model which is robust to a wide range of distributions and hard to exploit. This suggests a practical approach to improving robustness which involves actively searching for examples on which a model fails, and using those examples to train new, more robust models. This general approach has been applied in a variety of settings in NLP, such as the Build-It Break-It shared task (Ettinger et al., 2017) , adversarial filtering of large datasets (Zellers et al., 2018; , and adversarial benchmarking and leaderboards (Nie et al., 2020a; .", |
|
"cite_spans": [ |
|
{ |
|
"start": 671, |
|
"end": 694, |
|
"text": "(Ettinger et al., 2017)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 737, |
|
"end": 759, |
|
"text": "(Zellers et al., 2018;", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 808, |
|
"end": 827, |
|
"text": "(Nie et al., 2020a;", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "One complication that arises when sourcing adversarial data is with ambiguous or arguable examples. Suppose\u03b8 perfectly models p(y | x). Plugging this into Formula 2 yields max D\u2208D H(", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Y | x),", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "where D is concentrated on the inputs x which maximize the entropy of Y .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this context, high entropy in the conditional distribution of Y corresponds to high annotator disagreement. 2 When a human searches for an adversarial example, they are looking for a disagreement between themselves and the model. In this setting, there may be competition for inclusion in these adversarial tasks between ambiguous examples on which the model is close to the gold (annotator) distribution and less ambiguous examples where the model is further from gold. Thus an adversarial data generation process may be biased towards input examples which are ambiguous but unhelpful for training.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Formally, a simple way to think about counteracting this may be to explicitly subtract the gold entropy from the loss being minimized:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "argmin \u03b8 max D\u2208D E x\u223cD,y\u223cp(\u2022|x) \u2212 log p(y | x, \u03b8) + log p(y | x).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "( 3)Here, the objective focuses the distribution D on examples which maximize the model's KL-Divergence from p(y | x), no longer favoring ambiguous examples. Practical approaches to scaling adversarial data collection have applied a similar idea: in Adversarial NLI (Nie et al., 2020a) and Dynabench , annotators are asked to find examples where they disagree with the model, and then these examples are only kept if multiple validators agree on the correct label. However, it is not clear how well-calibrated this process is: it might, for example, systematically omit genuinely ambiguous examples which the model gets wrong with high confidence. Whether training on data produced by this process results in pathological model behavior is what we test in this work.", |
|
"cite_spans": [ |
|
{ |
|
"start": 266, |
|
"end": 285, |
|
"text": "(Nie et al., 2020a)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Task Setting We use Natural Language Inference (Dagan et al., 2005; Bowman et al., 2015) as our underlying task, as there exist adversarial annotations for this task (Nie et al., 2020a; and annotator disagreement has been well studied (Pavlick and Kwiatkowski, 2019; Nie et al., 2020b; Zhang and de Marneffe, 2021 \u2022 ADVERSARIAL: These models are trained on data elicited from annotators under the requirement that they must fool the model. For this we will use the adversarial annotations of Nie et al. (2020a). 4", |
|
"cite_spans": [ |
|
{ |
|
"start": 47, |
|
"end": 67, |
|
"text": "(Dagan et al., 2005;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 68, |
|
"end": 88, |
|
"text": "Bowman et al., 2015)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 166, |
|
"end": 185, |
|
"text": "(Nie et al., 2020a;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 235, |
|
"end": 266, |
|
"text": "(Pavlick and Kwiatkowski, 2019;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 267, |
|
"end": 285, |
|
"text": "Nie et al., 2020b;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 286, |
|
"end": 313, |
|
"text": "Zhang and de Marneffe, 2021", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 ALL: These models are trained on the concatenation of all of the above data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Evaluation Data We test the performance of our models in the setting where we have comprehensive distributions of annotator behavior. For this, we will use the ChaosNLI evaluation sets (Nie et al., 2020b) which have 100 independent annotations for each example (where the task is 3-way multiclass classification). ChaosNLI includes evaluation sets for SNLI (Bowman et al., 2015), MultiNLI (Williams et al., 2018) , and \u03b1NLI (Bhagavatula et al., 2020, Abductive NLI) . Of these, we use the SNLI and MultiNLI sets, since \u03b1NLI has a different task format than other NLI datasets. Dataset statistics are shown in Table 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 185, |
|
"end": 204, |
|
"text": "(Nie et al., 2020b)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 389, |
|
"end": 412, |
|
"text": "(Williams et al., 2018)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 424, |
|
"end": 465, |
|
"text": "(Bhagavatula et al., 2020, Abductive NLI)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 609, |
|
"end": 616, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Metrics Using densely-annotated evaluation data, we compute several evaluation metrics. Each metric is stratified across non-overlapping ranges of annotator agreement in order to analyze the dependence of model performance (or model differences) on the ambiguity of its input examples. Let p(y) be the empirical distribution of annotator labels for an input example, and\u0177 be the model's prediction. Then, our metrics are:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Accuracy in Expectation: The expectation of the accuracy of the model against a randomly sampled annotator in ChaosNLI (i.e., p(\u0177)). We stratify this by the human accuracy in expectation, the accuracy of a randomlysampled human against the plurality vote of all annotators (max y (p(y))). We use discrete bins to allow for precise comparison of model performance within and between different regimes of ambiguity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Accuracy against Plurality: The accuracy of the model against the plurality vote of the 100 annotators (\u0177 = max(p(y))). We also stratify this by human accuracy in expectation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Model perplexity: The exponentiated entropy of the model's predicted distribution; higher corresponds to more uncertainty. (This is independent of the gold labels.) We stratify this by the perplexity of the human annotator distribution.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 KL-Divergence: The KL-Divergence of the model's predicted label distribution against the empirical distribution of annotated labels. This gives a measure of how well-calibrated the model is with respect to the true annotator distribution. We stratify this measure by the entropy of the human annotator distribution.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Accuracy in expectation emulates the typical accuracy computation in an IID empirical risk minimization setting, while accuracy against plurality allows us to measure accuracy scores above human performance (assuming the plurality among 100 annotators can be treated as the ground truth). 5 We also include the annotator distribution as a human reference point (for KL-Divergence, this is 0 by construction).", |
|
"cite_spans": [ |
|
{ |
|
"start": 289, |
|
"end": 290, |
|
"text": "5", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In all of our experiments, we begin with RoBERTa-Large (Liu et al., 2019) , a masked language model pretrained on a large text corpus comprised of internet and book corpora. We then attach a classifier head and fine-tune each model according to the dataset combinations listed in Section 3. The model was implemented using the AllenNLP library and trained using the AdamW optimizer to maximize accuracy on the combined development sets of the model variant's respective corpora. 5 The accuracy metrics provided for NLI datasets in practice are somewhere between the two, as the development and test sets of SNLI and MultiNLI were labeled by 5 annotators each and the majority label was chosen for the purposes of evaluation (Bowman et al., 2015; Williams et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 55, |
|
"end": 73, |
|
"text": "(Liu et al., 2019)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 479, |
|
"end": 480, |
|
"text": "5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 724, |
|
"end": 745, |
|
"text": "(Bowman et al., 2015;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 746, |
|
"end": 768, |
|
"text": "Williams et al., 2018)", |
|
"ref_id": "BIBREF35" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation Details", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "All results in this section are reported on the SNLI and MultiNLI development set portions of the ChaosNLI data. In all graphs, we provide smoothed kernel density estimates of the distributions over X and Y values in the margins where appropriate. Shaded areas around the lines represent 95% confidence intervals.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Accuracy in Expectation Model accuracy against randomly sampled annotators is shown in Figure 1 . All models exhibit the same overall trend, approaching or reaching human performance on the most ambiguous and least ambiguous examples, with a dip in the middle of the range. Even if adversarial data collection does under-sample ambiguous inputs, we find no noticeable (or significant) effect on model performance in the low-agreement regime. A potential reason for this is that the baseline performance is already so low in these cases -very close to chance level -that there is little room for decreasing performance further.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 87, |
|
"end": 95, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Accuracy against Plurality Model accuracy against the plurality vote among annotators is shown in Figure 2 . Once again, all models exhibit the same overall trend. While performance seems to level off or even increase for some models on extremely high-ambiguity examples (<50% human accuracy in expectation), there are too few such examples for us to draw any reliable conclusions in this regime.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 98, |
|
"end": 106, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Perplexity To understand the confidence levels of our models, we measure the perplexity of their output distributions and compare it to the perplexity of the human annotator distributions, shown in Figure 3 . Here, there is a clear difference between ADVERSARIAL and the other models: it has extremely low perplexity on many more examples, and high perplexity on very few. Furthermore, while model perplexity is positively correlated with annotator perplexity for all models, the ADVERSARIAL model is less sensitive to it, with its perplexity growing less with respect to annotator perplexity. This suggests the adversarial data collection process may, on aggregate, favor examples with less ambiguity, skewing the behavior of the model. The ALL model, which was exposed to naturalistic data as well, does not display the same effect. : Model accuracy stratified by human accuracy, relative to the human plurality vote. The early dip in the human baseline below 50% is from a few cases with tied plurality votes (where we break ties randomly). : Calibration curves for accuracy against the plurality vote among humans. As the confidence score, we use the probability assigned by the model to its prediction.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 198, |
|
"end": 206, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "To get a sense of how well the model fits the annotator distributions, we show the KL-Divergence of the models' predictions against the annotator distributions in Figure 4 . What we find is that ADVERSARIAL diverges greatly from the gold distributions in comparison to CLASSI-CAL and ALL: it has much higher KL-Divergence in aggregate, many more examples with high KL-Divergence, and its KL-Divergence scores grow more quickly as the entropy of the annotator distribution increases. The biases in adversarial data collection, then, have led more to overconfidence on ambiguous examples than wrong predictions on unambiguous examples. These results provide supporting evidence for the hypothesis that training a model on adversarially-collected data may underexpose it to ambiguous examples and that this could have undesirable effects on its performance. However, these effects seem to be mitigated with the additional inclusion of naturalistic data (in the ALL model).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 163, |
|
"end": 171, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "KL-Divergence", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Calibration Calibration curves are shown in Figure 5 . We find that the ADVERSARIAL model is highly confident more often than the other models, and at least in the very-high-confidence regime (>80% confidence), it has significantly worse calibration on SNLI (for MultiNLI, the results are borderline and only for the highest-confidence bin). We also plot calibration curves relative to the plurality vote among annotators ( Figure 6 ), which reflects the assumption that the model's maximum output probability reflects its epistemic uncertainty over the max-probability label. Here, the results are similar: the ADVERSARIAL model is worse calibrated in the very-high-confidence regime. Note, however, that when optimizing to maximize the likelihood of labels sampled from annotators, the output probabilities of a perfect model will not be well-calibrated against a plurality-based groundtruth. Optimizing for a model calibrated in this way is an alternative design choice which may require different training methods.", |
|
"cite_spans": [ |
|
{ |
|
"start": 44, |
|
"end": 52, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 424, |
|
"end": 432, |
|
"text": "Figure 6", |
|
"ref_id": "FIGREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "KL-Divergence", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In our experiments using SNLI, MultiNLI, and ANLI, we find that training only on adversariallycollected data produces similar accuracies across all regimes of ambiguity, but worse calibration at high confidence, and more overconfidence on ambiguous examples. This suggests that the adversarial data collection process may bias the model by favoring less ambiguous examples, but there are other potential interpretations of our results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In particular, the observed miscalibration of AD-VERSARIAL may the result of a more general domain shift between SNLI/MultiNLI and ANLI. This could explain why adding SNLI and MultiNLI to training, as in the ALL model, eliminates the effect. However, one might also expect to see a clear difference in accuracy as well if this were the issue. It's also worth noting that the SNLI and MNLI training sets are larger than ANLI's (see Table 1 ), which could explain why the ALL model behaves similarly to CLASSICAL. It remains an open question how little naturalistic (or, in-domain) data may be sufficient to mitigate the overconfidence issues we observe. Some notable trends hold for all models we test. First, they all perform worse on ambiguous examples ( Figure 1, Figure 2 ). This may be in part due to the relative scarcity of such examples in the training data or the relative difficulty of learning to model them. Second, they all demonstrate overconfidence, with model perplexity growing slower than human perplexity ( Figure 3 ) and relatively poor calibration at high confidence levels ( Figure 6 ). Even though augmenting training with adversarially-collected data has been shown to improve robustness in some settings (Bartolo et al., 2021a; , our results do not yet show any benefits to calibration on ambiguous examples in existing data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 564, |
|
"end": 579, |
|
"text": "(or, in-domain)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1228, |
|
"end": 1251, |
|
"text": "(Bartolo et al., 2021a;", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 431, |
|
"end": 438, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 756, |
|
"end": 774, |
|
"text": "Figure 1, Figure 2", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 1025, |
|
"end": 1033, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 1096, |
|
"end": 1104, |
|
"text": "Figure 6", |
|
"ref_id": "FIGREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Finally, while we hypothesize that the overconfidence issue with training on adversarial data arises from filtering for annotator agreement, it is also possible that for ANLI, the adversarial annotators found examples that were less ambiguous in the first place (as annotators might, for example, want to focus on sure-fire model mistakes). found that about 5% of examples in ANLI \"could reasonably be given multiple correct labels,\" suggesting a low level of ambiguity, but this was by the judgment of a single expert and may not correspond to the full variation in label assignment seen with crowdsourced annotators (which could potentially be investigated using the original unfiltered ANLI data). Measuring, controlling, managing, or representing ambiguity in adversarial annotation should be an interesting direction for future work, perhaps incorporating insights from recent work about construal (Trott et al., 2020; Pavlick and Kwiatkowski, 2019) , explicit disambiguation (Min et al., 2020) , model training dynam-ics Liu et al., 2022) , and other model-in-the-loop adversarial data collection efforts (Bartolo et al., 2020 (Bartolo et al., , 2021b Potts et al., 2021) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 903, |
|
"end": 923, |
|
"text": "(Trott et al., 2020;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 924, |
|
"end": 954, |
|
"text": "Pavlick and Kwiatkowski, 2019)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 981, |
|
"end": 999, |
|
"text": "(Min et al., 2020)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 1027, |
|
"end": 1044, |
|
"text": "Liu et al., 2022)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 1111, |
|
"end": 1132, |
|
"text": "(Bartolo et al., 2020", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1133, |
|
"end": 1157, |
|
"text": "(Bartolo et al., , 2021b", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1158, |
|
"end": 1177, |
|
"text": "Potts et al., 2021)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We have shown that training only on adversariallycollected data, at least in the case of the Adversarial NLI (ANLI) dataset, can produce undesirable performance characteristics in the resulting models. In particular, when tested on SNLI and MultiNLI data, these models produce output distributions that are much further from annotator distributions and fail to accurately convey annotator uncertainty, with highly confident predictions even on highly ambiguous examples. It is also possible that adversarial training in this setting could produce lower prediction accuracy in regimes of low human agreement, but baseline accuracy is already so low for our models and data, and there are so few examples in the extremely-ambiguous regime, that such an effect is hard to find.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In our results, if a large amount of naturalistic data is also included in training (as in the ALL model) -as is standard practice -the overconfidence problem is mitigated. This is encouraging, as any adversarially-collected data must start with some naturalistic data to construct the initial adversary. However, it remains an open question how little naturalistic data is sufficient; a large enough seed corpus may be beneficial for avoiding such issues in a setting of dynamic adversarial data collection (Wallace et al., 2022) . Future work can investigate this question, as well as how using full annotator distributions at training time or model calibration techniques may further help models deal with ambiguous inputs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 508, |
|
"end": 530, |
|
"text": "(Wallace et al., 2022)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Code to reproduce our experiments is available at https: //github.com/julianmichael/aeae.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this work, we assume all annotators implement the same probabilistic labeling function (which we are calling 'gold') and disagreement between annotators arises as an inherent feature of the task we are trying to model. We also assume that approximating annotator behavior on arguable or ambiguous examples is a desirable goal. These are simplifications: in some settings, e.g., the prescriptive paradigm ofR\u00f6ttger et al. (2022), we may wish to minimize annotator disagreement to learn a deterministic labeling function. In such settings, model behavior on arguable inputs may be uninteresting from the evaluation perspective, though searching for such examples could be useful for refining the task definition or annotation guidelines. We leave such issues out of scope for this work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Unfortunately, since the NLI task is somewhat artificial, there is no \"natural\" distribution of input texts. This is one of the issues that leads to annotation artifacts in the first place (Gururangan et al., 2018) since some of the input text must be annotator-generated. Regardless, spurious correlations exist in any naturalistic distribution so we will use these training sets as proxies for something naturalistic.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In order for this to properly count as adversarial data for our model, we use the same model family asNie et al. (2020a), which is BERT-large(Devlin et al., 2019) fine-tuned on SNLI and MultiNLI.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(a) Chaos-SNLI. (b) Chaos-MultiNLI.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank Ludwig Schmidt for early comments on this project, Ofir Press for providing entertainment, and the anonymous reviewers for their useful feedback.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Sebastian Riedel, and Pontus Stenetorp. 2020. Beat the AI: Investigating adversarial human annotation for reading comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Bartolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alastair", |
|
"middle": [], |
|
"last": "Roberts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johannes", |
|
"middle": [], |
|
"last": "Welbl", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "8", |
|
"issue": "", |
|
"pages": "662--678", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/tacl_a_00338" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Max Bartolo, Alastair Roberts, Johannes Welbl, Sebas- tian Riedel, and Pontus Stenetorp. 2020. Beat the AI: Investigating adversarial human annotation for read- ing comprehension. Transactions of the Association for Computational Linguistics, 8:662-678.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Improving question answering model robustness with synthetic adversarial data generation", |
|
"authors": [ |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Bartolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tristan", |
|
"middle": [], |
|
"last": "Thrush", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Riedel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pontus", |
|
"middle": [], |
|
"last": "Stenetorp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "8830--8848", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2021.emnlp-main.696" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Max Bartolo, Tristan Thrush, Robin Jia, Sebastian Riedel, Pontus Stenetorp, and Douwe Kiela. 2021a. Improving question answering model robustness with synthetic adversarial data generation. In Proceedings of the 2021 Conference on Empirical Methods in Nat- ural Language Processing, pages 8830-8848, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Pontus Stenetorp, Robin Jia, and Douwe Kiela. 2021b. Models in the loop: Aiding crowdworkers with generative annotation assistants", |
|
"authors": [ |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Bartolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tristan", |
|
"middle": [], |
|
"last": "Thrush", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Riedel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.48550/ARXIV.2112.09062" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Max Bartolo, Tristan Thrush, Sebastian Riedel, Pontus Stenetorp, Robin Jia, and Douwe Kiela. 2021b. Mod- els in the loop: Aiding crowdworkers with generative annotation assistants.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Abductive commonsense reasoning", |
|
"authors": [ |
|
{ |
|
"first": "Chandra", |
|
"middle": [], |
|
"last": "Bhagavatula", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chaitanya", |
|
"middle": [], |
|
"last": "Ronan Le Bras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Keisuke", |
|
"middle": [], |
|
"last": "Malaviya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ari", |
|
"middle": [], |
|
"last": "Sakaguchi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannah", |
|
"middle": [], |
|
"last": "Holtzman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Doug", |
|
"middle": [], |
|
"last": "Rashkin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wen-Tau", |
|
"middle": [], |
|
"last": "Downey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Yih", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "8th International Conference on Learning Representations", |
|
"volume": "2020", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chandra Bhagavatula, Ronan Le Bras, Chaitanya Malaviya, Keisuke Sakaguchi, Ari Holtzman, Han- nah Rashkin, Doug Downey, Wen-tau Yih, and Yejin Choi. 2020. Abductive commonsense reasoning. In 8th International Conference on Learning Represen- tations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "A large annotated corpus for learning natural language inference", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Samuel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gabor", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Angeli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Potts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Samuel R. Bowman, Gabor Angeli, Christopher Potts, and Christopher D. Manning. 2015. A large anno- tated corpus for learning natural language inference. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing (EMNLP). Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Adversarial filters of dataset biases", |
|
"authors": [ |
|
{ |
|
"first": "Swabha", |
|
"middle": [], |
|
"last": "Ronan Le Bras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chandra", |
|
"middle": [], |
|
"last": "Swayamdipta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rowan", |
|
"middle": [], |
|
"last": "Bhagavatula", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Zellers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Sabharwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 37th International Conference on Machine Learning", |
|
"volume": "119", |
|
"issue": "", |
|
"pages": "1078--1088", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ronan Le Bras, Swabha Swayamdipta, Chandra Bha- gavatula, Rowan Zellers, Matthew Peters, Ashish Sabharwal, and Yejin Choi. 2020. Adversarial filters of dataset biases. In Proceedings of the 37th Inter- national Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pages 1078-1088. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "The pascal recognising textual entailment challenge", |
|
"authors": [ |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Ido Dagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bernardo", |
|
"middle": [], |
|
"last": "Glickman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Magnini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the First international conference on Machine Learning Challenges: evaluating Predictive Uncertainty Visual Object Classification, and Recognizing Textual Entailment", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "177--190", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ido Dagan, Oren Glickman, and Bernardo Magnini. 2005. The pascal recognising textual entailment chal- lenge. In Proceedings of the First international con- ference on Machine Learning Challenges: evaluating Predictive Uncertainty Visual Object Classification, and Recognizing Textual Entailment, pages 177-190.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Deep biaffine attention for neural dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Dozat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "5th International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Timothy Dozat and Christopher D. Manning. 2017. Deep biaffine attention for neural dependency pars- ing. In 5th International Conference on Learning Representations, ICLR 2017, Toulon, France, April 24-26, 2017, Conference Track Proceedings. Open- Review.net.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Towards linguistically generalizable NLP systems: A workshop and shared task", |
|
"authors": [ |
|
{ |
|
"first": "Allyson", |
|
"middle": [], |
|
"last": "Ettinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sudha", |
|
"middle": [], |
|
"last": "Rao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hal", |
|
"middle": [], |
|
"last": "Daum\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iii", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emily", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Bender", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the First Workshop on Building Linguistically Generalizable NLP Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--10", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W17-5401" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Allyson Ettinger, Sudha Rao, Hal Daum\u00e9 III, and Emily M. Bender. 2017. Towards linguistically gen- eralizable NLP systems: A workshop and shared task. In Proceedings of the First Workshop on Building Lin- guistically Generalizable NLP Systems, pages 1-10, Copenhagen, Denmark. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Robust solutions to least-squares problems with uncertain data", |
|
"authors": [ |
|
{ |
|
"first": "Laurent", |
|
"middle": [ |
|
"El" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ghaoui", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Herv\u00e9", |
|
"middle": [], |
|
"last": "Lebret", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "SIAM J. Matrix Anal. Appl", |
|
"volume": "18", |
|
"issue": "4", |
|
"pages": "1035--1064", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1137/S0895479896298130" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Laurent El Ghaoui and Herv\u00e9 Lebret. 1997. Robust solutions to least-squares problems with uncertain data. SIAM J. Matrix Anal. Appl., 18(4):1035-1064.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Annotation artifacts in natural language inference data", |
|
"authors": [ |
|
{ |
|
"first": "Swabha", |
|
"middle": [], |
|
"last": "Suchin Gururangan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Swayamdipta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roy", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Bowman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "107--112", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-2017" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Suchin Gururangan, Swabha Swayamdipta, Omer Levy, Roy Schwartz, Samuel Bowman, and Noah A. Smith. 2018. Annotation artifacts in natural language infer- ence data. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 2 (Short Papers), volume 2, pages 107-112. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Adversarial examples are not bugs, they are features", |
|
"authors": [ |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Ilyas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shibani", |
|
"middle": [], |
|
"last": "Santurkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dimitris", |
|
"middle": [], |
|
"last": "Tsipras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Logan", |
|
"middle": [], |
|
"last": "Engstrom", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brandon", |
|
"middle": [], |
|
"last": "Tran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aleksander", |
|
"middle": [], |
|
"last": "Madry", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrew Ilyas, Shibani Santurkar, Dimitris Tsipras, Lo- gan Engstrom, Brandon Tran, and Aleksander Madry. 2019. Adversarial examples are not bugs, they are features. In Advances in Neural Information Process- ing Systems, volume 32. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Dynabench: Rethinking benchmarking in NLP. CoRR", |
|
"authors": [ |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Bartolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yixin", |
|
"middle": [], |
|
"last": "Nie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Divyansh", |
|
"middle": [], |
|
"last": "Kaushik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Atticus", |
|
"middle": [], |
|
"last": "Geiger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhengxuan", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bertie", |
|
"middle": [], |
|
"last": "Vidgen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Grusha", |
|
"middle": [], |
|
"last": "Prasad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanpreet", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pratik", |
|
"middle": [], |
|
"last": "Ringshia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyi", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Douwe Kiela, Max Bartolo, Yixin Nie, Divyansh Kaushik, Atticus Geiger, Zhengxuan Wu, Bertie Vid- gen, Grusha Prasad, Amanpreet Singh, Pratik Ring- shia, Zhiyi Ma, Tristan Thrush, Sebastian Riedel, Zeerak Waseem, Pontus Stenetorp, Robin Jia, Mo- hit Bansal, Christopher Potts, and Adina Williams. 2021. Dynabench: Rethinking benchmarking in NLP. CoRR, abs/2104.14337.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "End-to-end neural coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luheng", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "188--197", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D17-1018" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kenton Lee, Luheng He, Mike Lewis, and Luke Zettle- moyer. 2017. End-to-end neural coreference reso- lution. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Process- ing, pages 188-197. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Wanli: Worker and ai collaboration for natural language inference dataset creation", |
|
"authors": [ |
|
{ |
|
"first": "Alisa", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Swabha", |
|
"middle": [], |
|
"last": "Swayamdipta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alisa Liu, Swabha Swayamdipta, Noah A. Smith, and Yejin Choi. 2022. Wanli: Worker and ai collaboration for natural language inference dataset creation.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Roberta: A robustly optimized bert pretraining approach", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.11692" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining ap- proach. arXiv preprint arXiv:1907.11692.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Towards deep learning models resistant to adversarial attacks", |
|
"authors": [ |
|
{ |
|
"first": "Aleksander", |
|
"middle": [], |
|
"last": "Madry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aleksandar", |
|
"middle": [], |
|
"last": "Makelov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ludwig", |
|
"middle": [], |
|
"last": "Schmidt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dimitris", |
|
"middle": [], |
|
"last": "Tsipras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adrian", |
|
"middle": [], |
|
"last": "Vladu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "6th International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. 2018. Towards deep learning models resistant to adversarial attacks. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 -May 3, 2018, Conference Track Proceed- ings. OpenReview.net.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "AmbigQA: Answering ambiguous open-domain questions", |
|
"authors": [ |
|
{ |
|
"first": "Sewon", |
|
"middle": [], |
|
"last": "Min", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannaneh", |
|
"middle": [], |
|
"last": "Hajishirzi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5783--5797", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.emnlp-main.466" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sewon Min, Julian Michael, Hannaneh Hajishirzi, and Luke Zettlemoyer. 2020. AmbigQA: Answering am- biguous open-domain questions. In Proceedings of the 2020 Conference on Empirical Methods in Nat- ural Language Processing (EMNLP), pages 5783- 5797, Online. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Adversarial NLI: A new benchmark for natural language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Yixin", |
|
"middle": [], |
|
"last": "Nie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adina", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Dinan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4885--4901", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.441" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yixin Nie, Adina Williams, Emily Dinan, Mohit Bansal, Jason Weston, and Douwe Kiela. 2020a. Adversarial NLI: A new benchmark for natural language under- standing. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguistics, pages 4885-4901, Online. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "What can we learn from collective human opinions on natural language inference data?", |
|
"authors": [ |
|
{ |
|
"first": "Yixin", |
|
"middle": [], |
|
"last": "Nie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "9131--9143", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.emnlp-main.734" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yixin Nie, Xiang Zhou, and Mohit Bansal. 2020b. What can we learn from collective human opinions on nat- ural language inference data? In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 9131-9143, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Inherent disagreements in human textual inferences", |
|
"authors": [ |
|
{ |
|
"first": "Ellie", |
|
"middle": [], |
|
"last": "Pavlick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Kwiatkowski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "677--694", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/tacl_a_00293" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ellie Pavlick and Tom Kwiatkowski. 2019. Inherent disagreements in human textual inferences. Transac- tions of the Association for Computational Linguis- tics, 7:677-694.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Causal inference by using invariant prediction: identification and confidence intervals", |
|
"authors": [ |
|
{ |
|
"first": "Jonas", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "B\u00fchlmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicolai", |
|
"middle": [], |
|
"last": "Meinshausen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Journal of the Royal Statistical Society: Series B (Statistical Methodology)", |
|
"volume": "78", |
|
"issue": "5", |
|
"pages": "947--1012", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1111/rssb.12167" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jonas Peters, Peter B\u00fchlmann, and Nicolai Meinshausen. 2016. Causal inference by using invariant prediction: identification and confidence intervals. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 78(5):947-1012.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Adversarially constructed evaluation sets are more challenging", |
|
"authors": [ |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Phang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angelica", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jason Phang, Angelica Chen, William Huang, and Samuel R. Bowman. 2021. Adversarially constructed evaluation sets are more challenging, but may not be fair. CoRR, abs/2111.08181.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "DynaSent: A dynamic benchmark for sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Potts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhengxuan", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Atticus", |
|
"middle": [], |
|
"last": "Geiger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2388--2404", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2021.acl-long.186" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christopher Potts, Zhengxuan Wu, Atticus Geiger, and Douwe Kiela. 2021. DynaSent: A dynamic bench- mark for sentiment analysis. In Proceedings of the 59th Annual Meeting of the Association for Compu- tational Linguistics and the 11th International Joint Conference on Natural Language Processing (Vol- ume 1: Long Papers), pages 2388-2404, Online. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Two contrasting data annotation paradigms for subjective nlp tasks", |
|
"authors": [ |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "R\u00f6ttger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bertie", |
|
"middle": [], |
|
"last": "Vidgen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dirk", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Janet", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Pierrehumbert", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paul R\u00f6ttger, Bertie Vidgen, Dirk Hovy, and Janet B. Pierrehumbert. 2022. Two contrasting data annota- tion paradigms for subjective nlp tasks. In Proceed- ings of the 2022 Conference of the North American Chapter of the Association for Computational Lin- guistics: Human Language Technologies, Seattle, WA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Winogrande: An adversarial winograd schema challenge at scale", |
|
"authors": [ |
|
{ |
|
"first": "Keisuke", |
|
"middle": [], |
|
"last": "Sakaguchi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Le", |
|
"middle": [], |
|
"last": "Ronan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chandra", |
|
"middle": [], |
|
"last": "Bras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Bhagavatula", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "34", |
|
"issue": "", |
|
"pages": "8732--8740", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1609/aaai.v34i05.6399" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Keisuke Sakaguchi, Ronan Le Bras, Chandra Bhagavat- ula, and Yejin Choi. 2020. Winogrande: An adversar- ial winograd schema challenge at scale. Proceedings of the AAAI Conference on Artificial Intelligence, 34(05):8732-8740.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Dataset cartography: Mapping and diagnosing datasets with training dynamics", |
|
"authors": [ |
|
{ |
|
"first": "Swabha", |
|
"middle": [], |
|
"last": "Swayamdipta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roy", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicholas", |
|
"middle": [], |
|
"last": "Lourie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yizhong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannaneh", |
|
"middle": [], |
|
"last": "Hajishirzi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "9275--9293", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.emnlp-main.746" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Swabha Swayamdipta, Roy Schwartz, Nicholas Lourie, Yizhong Wang, Hannaneh Hajishirzi, Noah A. Smith, and Yejin Choi. 2020. Dataset cartography: Mapping and diagnosing datasets with training dynamics. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 9275-9293, Online. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "2020. (Re)construing meaning in NLP", |
|
"authors": [ |
|
{ |
|
"first": "Sean", |
|
"middle": [], |
|
"last": "Trott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nancy", |
|
"middle": [], |
|
"last": "Tiago Timponi Torrent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nathan", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Schneider", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5170--5184", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.462" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sean Trott, Tiago Timponi Torrent, Nancy Chang, and Nathan Schneider. 2020. (Re)construing meaning in NLP. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 5170-5184, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Learning from the worst: Dynamically generated datasets to improve online hate detection", |
|
"authors": [ |
|
{ |
|
"first": "Bertie", |
|
"middle": [], |
|
"last": "Vidgen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tristan", |
|
"middle": [], |
|
"last": "Thrush", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zeerak", |
|
"middle": [], |
|
"last": "Waseem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1667--1682", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2021.acl-long.132" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bertie Vidgen, Tristan Thrush, Zeerak Waseem, and Douwe Kiela. 2021. Learning from the worst: Dy- namically generated datasets to improve online hate detection. In Proceedings of the 59th Annual Meet- ing of the Association for Computational Linguistics and the 11th International Joint Conference on Natu- ral Language Processing (Volume 1: Long Papers), pages 1667-1682, Online. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Statistical decision functions which minimize the maximum risk", |
|
"authors": [ |
|
{ |
|
"first": "Abraham", |
|
"middle": [], |
|
"last": "Wald", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1945, |
|
"venue": "Annals of Mathematics", |
|
"volume": "45", |
|
"issue": "2", |
|
"pages": "265--280", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.2307/1969022" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abraham Wald. 1945. Statistical decision functions which minimize the maximum risk. Annals of Math- ematics, 45(2):265-280.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Analyzing dynamic adversarial training data in the limit", |
|
"authors": [ |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Wallace", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adina", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "Findings of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eric Wallace, Adina Williams, Robin Jia, and Douwe Kiela. 2022. Analyzing dynamic adversarial training data in the limit. In Findings of the Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "SuperGLUE: A multi-task benchmark and analysis platform for natural language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yada", |
|
"middle": [], |
|
"last": "Pruksachatkun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikita", |
|
"middle": [], |
|
"last": "Nangia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanpreet", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "3261--3275", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Wang, Yada Pruksachatkun, Nikita Nangia, Aman- preet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. 2019. SuperGLUE: A multi-task benchmark and analysis platform for natural language understanding. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alch\u00e9-Buc, E. Fox, and R. Garnett, editors, Advances in Neu- ral Information Processing Systems 32, pages 3261- 3275. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "GLUE: A multi-task benchmark and analysis platform for natural language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanpreet", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "353--355", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-5446" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel Bowman. 2018. GLUE: A multi-task benchmark and analysis platform for nat- ural language understanding. In Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 353-355. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "A broad-coverage challenge corpus for sentence understanding through inference", |
|
"authors": [ |
|
{ |
|
"first": "Adina", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikita", |
|
"middle": [], |
|
"last": "Nangia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1112--1122", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-1101" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adina Williams, Nikita Nangia, and Samuel Bowman. 2018. A broad-coverage challenge corpus for sen- tence understanding through inference. In Proceed- ings of the 2018 Conference of the North American Chapter of the Association for Computational Lin- guistics: Human Language Technologies, Volume 1 (Long Papers), pages 1112-1122, New Orleans, Louisiana. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "ANLIzing the adversarial natural language inference dataset", |
|
"authors": [ |
|
{ |
|
"first": "Adina", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tristan", |
|
"middle": [], |
|
"last": "Thrush", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "Proceedings of the Society for Computation in Linguistics 2022", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "23--54", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adina Williams, Tristan Thrush, and Douwe Kiela. 2022. ANLIzing the adversarial natural language inference dataset. In Proceedings of the Society for Computation in Linguistics 2022, pages 23-54, on- line. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "SWAG: A large-scale adversarial dataset for grounded commonsense inference", |
|
"authors": [ |
|
{ |
|
"first": "Rowan", |
|
"middle": [], |
|
"last": "Zellers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonatan", |
|
"middle": [], |
|
"last": "Bisk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roy", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "93--104", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1009" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rowan Zellers, Yonatan Bisk, Roy Schwartz, and Yejin Choi. 2018. SWAG: A large-scale adversarial dataset for grounded commonsense inference. In Proceed- ings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 93-104, Brus- sels, Belgium. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Learning with different amounts of annotation: From zero to many labels", |
|
"authors": [ |
|
{ |
|
"first": "Shujian", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chengyue", |
|
"middle": [], |
|
"last": "Gong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eunsol", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7620--7632", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2021.emnlp-main.601" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shujian Zhang, Chengyue Gong, and Eunsol Choi. 2021. Learning with different amounts of annotation: From zero to many labels. In Proceedings of the 2021 Conference on Empirical Methods in Natural Lan- guage Processing, pages 7620-7632, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Identifying inherent disagreement in natural language inference", |
|
"authors": [ |
|
{ |
|
"first": "Frederick", |
|
"middle": [], |
|
"last": "Xinliang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marie-Catherine", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "De Marneffe", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4908--4915", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2021.naacl-main.390" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xinliang Frederick Zhang and Marie-Catherine de Marneffe. 2021. Identifying inherent disagree- ment in natural language inference. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 4908-4915, Online. Association for Computational Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "(a) Chaos-SNLI. (b) Chaos-MultiNLI.", |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "Model accuracy stratified by human accuracy, relative to a randomly sampled human judgment. Chance accuracy is approximately 1 3 , and the human baseline (which uses the plurality vote as the prediction) is an upper bound.(a) Chaos-SNLI. (b) Chaos-MultiNLI.", |
|
"uris": null |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "Figure 2: Model accuracy stratified by human accuracy, relative to the human plurality vote. The early dip in the human baseline below 50% is from a few cases with tied plurality votes (where we break ties randomly).", |
|
"uris": null |
|
}, |
|
"FIGREF3": { |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "Model perplexity relative to annotator perplexity.(a) Chaos-SNLI. (b) Chaos-MultiNLI.", |
|
"uris": null |
|
}, |
|
"FIGREF4": { |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "KL-Divergence of model outputs from the annotator distribution, graphed relative to annotator entropy. Both axes are measured in nats. (a) Chaos-SNLI. (b) Chaos-MultiNLI.", |
|
"uris": null |
|
}, |
|
"FIGREF5": { |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "Calibration curves for accuracy against a randomly sampled human. As the confidence score, we use the probability assigned by the model to its prediction.(a) Chaos-SNLI. (b) Chaos-MultiNLI.", |
|
"uris": null |
|
}, |
|
"FIGREF6": { |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "Figure 6: Calibration curves for accuracy against the plurality vote among humans. As the confidence score, we use the probability assigned by the model to its prediction.", |
|
"uris": null |
|
}, |
|
"TABREF0": { |
|
"html": null, |
|
"text": ").", |
|
"content": "<table><tr><td>Dataset</td><td>Train</td><td>Dev</td></tr><tr><td>SNLI</td><td colspan=\"2\">550,152 10,000</td></tr><tr><td>MultiNLI</td><td colspan=\"2\">392,702 10,000</td></tr><tr><td colspan=\"3\">ANLI (all rounds) 162,865 3,200</td></tr><tr><td>Chaos-SNLI</td><td/><td>1,514</td></tr><tr><td>Chaos-MultiNLI</td><td/><td>1,599</td></tr><tr><td>Model Variants We train models under three conditions:</td><td/><td/></tr><tr><td>\u2022 CLASSICAL: These models are trained on</td><td/><td/></tr><tr><td>data elicited from annotators in a model-agnostic way, i.e., naturalistically. 3 For this</td><td/><td/></tr><tr><td>we use the SNLI (Bowman et al., 2015) and</td><td/><td/></tr><tr><td>MultiNLI (Williams et al., 2018) datasets.</td><td/><td/></tr></table>", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"text": "Number of examples in training and development sets we use. For training data (top), development sets are used for model selection, while our evaluations (bottom) are on the ChaosNLI-annotated subsets of the SNLI and MultiNLI development sets.", |
|
"content": "<table/>", |
|
"num": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |