|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T01:08:54.241324Z" |
|
}, |
|
"title": "To what extent do human explanations of model behavior align with actual model behavior?", |
|
"authors": [ |
|
{ |
|
"first": "Grusha", |
|
"middle": [], |
|
"last": "Prasad", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Johns Hopkins University", |
|
"location": {} |
|
}, |
|
"email": "grusha.prasad@jhu.edu" |
|
}, |
|
{ |
|
"first": "Yixin", |
|
"middle": [], |
|
"last": "Nie", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "UNC Chapel Hill", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "UNC Chapel Hill", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Adina", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "adinawilliams@fb.com" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Given the increasingly prominent role NLP models (will) play in our lives, it is important for human expectations of model behavior to align with actual model behavior. Using Natural Language Inference (NLI) as a case study, we investigate the extent to which human-generated explanations of models' inference decisions align with how models actually make these decisions. More specifically, we define three alignment metrics that quantify how well natural language explanations align with model sensitivity to input words, as measured by integrated gradients. Then, we evaluate eight different models (the base and large versions of BERT, RoBERTa and ELECTRA, as well as an RNN and bag-of-words model), and find that the BERT-base model has the highest alignment with human-generated explanations, for all alignment metrics. Focusing in on transformers, we find that the base versions tend to have higher alignment with human-generated explanations than their larger counterparts, suggesting that increasing the number of model parameters leads, in some cases, to worse alignment with human explanations. Finally, we find that a model's alignment with human explanations is not predicted by the model's accuracy, suggesting that accuracy and alignment are complementary ways to evaluate models. * The work was conducted during an internship at Facebook AI Research.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Given the increasingly prominent role NLP models (will) play in our lives, it is important for human expectations of model behavior to align with actual model behavior. Using Natural Language Inference (NLI) as a case study, we investigate the extent to which human-generated explanations of models' inference decisions align with how models actually make these decisions. More specifically, we define three alignment metrics that quantify how well natural language explanations align with model sensitivity to input words, as measured by integrated gradients. Then, we evaluate eight different models (the base and large versions of BERT, RoBERTa and ELECTRA, as well as an RNN and bag-of-words model), and find that the BERT-base model has the highest alignment with human-generated explanations, for all alignment metrics. Focusing in on transformers, we find that the base versions tend to have higher alignment with human-generated explanations than their larger counterparts, suggesting that increasing the number of model parameters leads, in some cases, to worse alignment with human explanations. Finally, we find that a model's alignment with human explanations is not predicted by the model's accuracy, suggesting that accuracy and alignment are complementary ways to evaluate models. * The work was conducted during an internship at Facebook AI Research.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "NLP models often make classification decisions in ways humans don't expect them to. For example, Question Answering (QA) models often choose the correct answer for one example, but fail catastrophically on other very similar examples (Ribeiro et al., 2018; Selvaraju et al., 2020) , such as answering \"Is the rose red?\" with no, but then \"What color is the rose?\" Figure 1 : An example illustrating different token-level importance values. \"Model Importance\" is color coded by absolute value integrated gradients attribution for BERT-base. The other three rows show the oracle importance scores estimated by the hard, soft and expert oracles (darker values indicate more important).", |
|
"cite_spans": [ |
|
{ |
|
"start": 234, |
|
"end": 256, |
|
"text": "(Ribeiro et al., 2018;", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 257, |
|
"end": 280, |
|
"text": "Selvaraju et al., 2020)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 364, |
|
"end": 372, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "with \"red\" (Ribeiro et al., 2019) . VQA models often attend to different portions of images than humans do (Das et al., 2016) . NLI models often rely on shallow heuristics, their predictions are inappropriately affected by particular words, and they sometimes perform unexpectedly well from only looking at the hypothesis (Gururangan et al., 2018; Poliak et al., 2018; Tsuchiya, 2018) . Since people generally do not expect models to base decisions on spurious correlations in the data (cf. McCoy et al. 2019) , models that make decisions in alignment with human expectations are less likely to make the right decisions for the wrong reasons.", |
|
"cite_spans": [ |
|
{ |
|
"start": 11, |
|
"end": 33, |
|
"text": "(Ribeiro et al., 2019)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 107, |
|
"end": 125, |
|
"text": "(Das et al., 2016)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 322, |
|
"end": 347, |
|
"text": "(Gururangan et al., 2018;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 348, |
|
"end": 368, |
|
"text": "Poliak et al., 2018;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 369, |
|
"end": 384, |
|
"text": "Tsuchiya, 2018)", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 486, |
|
"end": 509, |
|
"text": "(cf. McCoy et al. 2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we measure how well model decisions are aligned with human expectations about those decisions. Building on work that aims to extract or generate interpretable or faithful descriptions of model behavior (Lipton, 2018; Rajani et al., 2019; Kalouli et al., 2020; Silva et al., 2020; Jacovi and Goldberg, 2020; Zhao and Vydiswaran, 2020) , we use human-generated natural language explanations to determine which portions of the input people expected to be important in influencing models' decisions. We then use Integrated Gradients (IG, Sundararajan et al. 2017) to determine which portions actually influenced the models' decisions. We term the alignment between them as Importance Alignment. We formulate three different methods of using human-generated natural language explanations to quantify human expectations of model behavior, resulting in three different methods for calculating importance alignment.", |
|
"cite_spans": [ |
|
{ |
|
"start": 217, |
|
"end": 231, |
|
"text": "(Lipton, 2018;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 232, |
|
"end": 252, |
|
"text": "Rajani et al., 2019;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 253, |
|
"end": 274, |
|
"text": "Kalouli et al., 2020;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 275, |
|
"end": 294, |
|
"text": "Silva et al., 2020;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 295, |
|
"end": 321, |
|
"text": "Jacovi and Goldberg, 2020;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 322, |
|
"end": 348, |
|
"text": "Zhao and Vydiswaran, 2020)", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 544, |
|
"end": 574, |
|
"text": "(IG, Sundararajan et al. 2017)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "As a case study, we applied our method to the Natural Language Inference task (Dagan et al., 2006; Bowman et al., 2015; Williams et al., 2018) in which models are tasked with classifying pairs of sentences according to whether the first sentence entails, contradicts, or is neutral with respect to the second. Concretely, we measured the extent to which the inference decisions of eight models (six state-of-the-art transformers, an LSTM model and a bag-of-words model) aligned with humangenerated explanations from the Adversarial NLI dataset (ANLI, Nie et al. 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 78, |
|
"end": 98, |
|
"text": "(Dagan et al., 2006;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 99, |
|
"end": 119, |
|
"text": "Bowman et al., 2015;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 120, |
|
"end": 142, |
|
"text": "Williams et al., 2018)", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 544, |
|
"end": 567, |
|
"text": "(ANLI, Nie et al. 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In all three methods for calculating Importance Alignment, BERT-base had the highest importance alignment score. We also found that the smaller, 'base' versions of transformers tended to have higher importance alignment scores than the corresponding large versions. However, being smaller doesn't always result in higher importance alignment, since both small non-transformer models had lower importance alignment scores than 'base' transformers. Finally, we demonstrate that more accurate models (both for classic test accuracy and on the diagnostic dataset HANS; McCoy et al. 2019) do not necessarily have higher importance alignment, suggesting that accuracy and alignment with human expectations are orthogonal dimensions along which models should be evaluated.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The term \"alignment\" has been used in several different contexts in AI: alignment of model behaviour with normative notions of human ethics (\"value alignment\"; Russell et al. 2015; Peng et al. 2020) , alignment between tokens from source to target in machine translation, alignment between images and text in image-caption alignment models, etc. In this paper, we propose a new type of alignment, Importance Alignment: we want models to not only generate accurate outputs, but also to generate these accurate outputs for reasons that align with human expectations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 160, |
|
"end": 180, |
|
"text": "Russell et al. 2015;", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 181, |
|
"end": 198, |
|
"text": "Peng et al. 2020)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "High importance alignment can be valuable because prior work has demonstrated that when people form correct mental models of AI decision boundaries, they make better AI-assisted decisions (Bansal et al., 2019a,b) . For example, when annotators are provided with additional information about model decisions, such as model accuracy (cf. Yin et al. 2019) or model-generated explanations (Bansal et al., 2020b) , it increases their level of trust and in some settings, can actually improve AI-assisted decision making . Therefore, optimizing models to have high importance alignment is a worthy goal, even if it can initially result in models with lower accuracy. In the words of Bansal et al. (2020a) , \"predictable performance is worth a slight sacrifice in AI accuracy,\" especially on tasks with potentially serious social implications.", |
|
"cite_spans": [ |
|
{ |
|
"start": 188, |
|
"end": 212, |
|
"text": "(Bansal et al., 2019a,b)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 336, |
|
"end": 352, |
|
"text": "Yin et al. 2019)", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 385, |
|
"end": 407, |
|
"text": "(Bansal et al., 2020b)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 677, |
|
"end": 698, |
|
"text": "Bansal et al. (2020a)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We assume that for some input example x = {x 1 , x 2 ...x n } with n tokens and a gold label y, there exists an annotator-generated explanation of why the gold label is correct and/or why a model might output an incorrect prediction. We convert this natural language explanation into an oracle importance score (I O m (x, y)) which quantifies the extent to which annotators expect each token in x to push the model's prediction towards or away from the gold label. 1 Then, to compute Importance Alignment we correlate the oracle importance score for x with the model importance (I m (x, y)), which quantifies the extent to which each token in x actually pushes the model's prediction towards or away from the gold label. A greater correlation between model and oracle importance scores indicates a greater alignment between how annotators expect models to make decisions and how these models actually make decisions. In the remainder of this section we describe our methods to calculate oracle and model importance scores as well as the importance alignment metric from correlating these two scores.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Measuring Importance Alignment", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We compute model importance scores using Integrated Gradients (IG; Sundararajan et al. 2017) . Concretely, we define model importance (I) for some model m and some example x (e.g., the concatenation of the premise and hypothesis for NLI) with the gold label y, as follows,", |
|
"cite_spans": [ |
|
{ |
|
"start": 67, |
|
"end": 92, |
|
"text": "Sundararajan et al. 2017)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Computing model importance scores", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "I m (x, y) = |IG m (x, y)|", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Computing model importance scores", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where IG m returns a vector of IG attribution scores with respect to the gold label for each token in x and | \u2022 | denotes component-wise absolute value. For some token in the input x i , a positive IG attribution score indicates that x i pushed the model's prediction towards the gold label, whereas a negative IG attribution score indicates that x i pushed the model's prediction away from the gold label. In a model with high importance alignment, we would expect positive attribution scores to be correlated with annotator expectations about why the gold label is correct, and negative attribution scores to be correlated with annotator expectations about why a model might output an incorrect prediction. In this paper, we are considering explanations which capture both of these annotator expectations without differentiating between them. Therefore, we use the absolute value of the IG attribution score.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Computing model importance scores", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Why Integrated Gradients? We use Integrated Gradients because they are axiomatically both interpretable and faithful (Sundararajan et al., 2017) , unlike attention based methods which have been argued are not faithful explanations of models' decision making processes (Jain and Wallace 2019, but see Wiegreffe and Pinter 2019 for a counterpoint). Other perturbation methods which are more faithful than attention such as LIME (Ribeiro et al., 2016) , SHAP (Lundberg and Lee, 2017), and their variants can be used, although these methods have been argued to be unreliable (Camburu et al., 2019; Slack et al., 2020; Camburu et al., 2021) . No current consensus exists on which methods should be employed (Hase and Bansal, 2020; Ghorbani et al., 2019) . Although we use IG, crucially, our method is not dependent on it; IG can be replaced with any method that can faithfully assign an importance score to each token in the input.", |
|
"cite_spans": [ |
|
{ |
|
"start": 117, |
|
"end": 144, |
|
"text": "(Sundararajan et al., 2017)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 300, |
|
"end": 313, |
|
"text": "Wiegreffe and", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 426, |
|
"end": 448, |
|
"text": "(Ribeiro et al., 2016)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 571, |
|
"end": 593, |
|
"text": "(Camburu et al., 2019;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 594, |
|
"end": 613, |
|
"text": "Slack et al., 2020;", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 614, |
|
"end": 635, |
|
"text": "Camburu et al., 2021)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 702, |
|
"end": 725, |
|
"text": "(Hase and Bansal, 2020;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 726, |
|
"end": 748, |
|
"text": "Ghorbani et al., 2019)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Computing model importance scores", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We describe three methods of converting natural language explanations into oracle importance scores: hard oracle, soft oracle and expert oracle.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Computing oracle importance scores", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Hard oracle importance. Hard oracle importance is a binary measure of token overlap between the input and the explanation. This measure captures the intuition that if annotators thought that specific tokens in the input are important for pushing the model towards or away from the gold label, then they will use those words in their explanation. Formally, for some input x with gold label y and explanation e, we define hard oracle importance as follows, where the overlap function yields a binary vector in which the i-th component is valued 1 if x i \u2208 e and x i is not a stop word; 2 the i-th component of the vector is valued 0 otherwise.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Computing oracle importance scores", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "I H m (x, y, e) = overlap(x m , e)", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Computing oracle importance scores", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "This measure is model specific only to the extent that the models differ in how they tokenize the input x; we assume that the annotator generated explanations themselves do not describe expectations about specific models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Computing oracle importance scores", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Soft oracle importance. The hard oracle is very simple and does not capture synonyms, entities referred to with pronouns, paraphrases, etc. To overcome these shortcomings, we also define soft oracle importance where we compute importance scores from IG of explanation-informed models. The input for these explanation-informed models is the original input x concatenated with some explanation e. The task of the model is to predict not only the gold label for the original task y, but also a binary output indicating whether e was an explanation about the current example or about some other example. This task requires the model to perform not only the target task (e.g., NLI) but also requires the model to establish a relationship between the provided explanation and the input, thereby incorporating information from the natural language explanation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Computing oracle importance scores", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Formally, for some input x with gold label y and explanation e, we define soft oracle importance as,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Computing oracle importance scores", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "I S m (x, y, e) = |IG m (x, e, y )| (3)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Computing oracle importance scores", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where m refers to the explanation-informed model and y refers to the target output of m which incorporates both y and a binary output indicating whether e is a matched explanation (e.g., for NLI, y would have six possible values). This measure is also model specific both because of tokenization and because the explanation-informed model has the same model architecture as the target model. Expert oracle importance. The hard and soft oracles are automatic ways of computing oracle importance scores. To validate these automatic measures, we also computed oracle importance scores from experts (three of the authors on this paper). Given the input, gold label, and annotator generated explanation for a given example, the expert annotators (N = 3) indicated which tokens of the input they believed that the original annotator (i.e., the one who generated the explanation) thought were important for the model's prediction. Since generating expert annotations was very time consuming, we computed this measure only for a random subset of 60 examples.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Computing oracle importance scores", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Formally, for some input x with gold label y and explanation e, we define expert oracle importance for any given token as the proportion of expert annotators who indicated that the token was important according to the annotator who generated the explanation. This is expressed as,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Computing oracle importance scores", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "I E m (x, y, e) = 1 N N k expert k (x m , e, y) (4)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Computing oracle importance scores", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where expert k returns a binary vector in which the i-th element is valued 1 if annotator k indicated that the i-th token was important, and 0 otherwise.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Computing oracle importance scores", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Like with hard oracle importance, this measure is model specific only to the extent that the models differ in how they tokenize the input x.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Computing oracle importance scores", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We could not compute oracle importance scores from the original annotators and had to rely on expert annotators because this information was absent from the dataset of natural language explanations we used. Additionally, we argue below that collecting high-quality oracle importance annotations from na\u00efve annotators can be very tricky.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Computing oracle importance scores", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Why start from human-generated natural language explanations? We convert natural language explanations to oracle importance scores instead of collecting oracle importance scores directly from na\u00efve annotators for two reasons. First, there already exist data sets of natural language explanations, where annotators were required to reason about models' decision making in an adversarial setting (Nie et al., 2020) , and more such data sets are being generated (Kiela et al., 2021) . Second, we contend that for most nonexpert annotators, asking them to provide verbal descriptions is easier and more natural than asking them to answer a question like, \"For which words do you think the model's prediction would change the most if that word was blanked out?\". In fact, to pursue this angle assiduously, one would ideally recruit annotators who know what IG is and ask them to predict IG scores-after all, since we are using IG scores to quantify how the models make decisions, the best way to quantify \"how humans think models make decisions\" would be to measure what humans think the IG scores will be. Unfortunately, such a task would be challenging for most non-expert annotators, making it infeasible to collect high quality annotations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 394, |
|
"end": 412, |
|
"text": "(Nie et al., 2020)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 459, |
|
"end": 479, |
|
"text": "(Kiela et al., 2021)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Computing oracle importance scores", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "For each example with input x, gold label y and explanation e, we compute importance alignment for some model m as the mean Fishertransformed product-moment (i.e., Pearson's) correlation (r) between the model importance and oracle importance scores for that example as below, where the oracle O is either the hard (H), soft (S) or expert (E) oracles:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Importance Alignment Metric", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "C m (x, y) = arctanh(r(I m (x, y), I O m (x, y, e)))", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Importance Alignment Metric", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We Fisher-transform the correlation coefficient r to ensure that C m (x, y) is unbiased and does not violate normality assumptions required for the statistical analyses we use (Fisher, 1921) . 3 For each example, we also compute a random baseline for C m (x, y) where the oracle 3 Fisher transformed correlation coefficients are approximately normally distributed when the correlation coefficients are calculated from sample pairs drawn from bivariate normal distributions. Although Im(x, y) and I O m (x, y, e)) are not normally distributed, visual examination of the resulting fisher transformed correlations revealed that Cm(x, y) were in fact approximately normal.", |
|
"cite_spans": [ |
|
{ |
|
"start": 176, |
|
"end": 190, |
|
"text": "(Fisher, 1921)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 193, |
|
"end": 194, |
|
"text": "3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 279, |
|
"end": 280, |
|
"text": "3", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Importance Alignment Metric", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "importances are calculated by pairing the input with an explanation (e R ) which was written for a different input example and was chosen at random:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Importance Alignment Metric", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "C m R (x, y) = arctanh(r(I m (x, y), I O m (x, y, e R )))", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "Importance Alignment Metric", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We compute this measure to control for spurious patterns that can drive the correlation between model and oracle importances: we can find a non-zero correlation between the importance scores if a certain model m, such as BERT, (explanation informed or not) always assigned high IG attribution values to tokens at specific indices; we can also find a non-zero correlation if certain tokens (e.g., \"the\") received high attribution irrespective of the context, and these tokens occurred frequently in the input and explanation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Importance Alignment Metric", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "To measure the extent to which model and oracle importances are correlated with each other over and above spurious correlations, we measure the mean difference (\u2206A) between C m (x, y) and C m R (x, y) for all examples in some dataset D and backtransform it to the correlation scale:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Importance Alignment Metric", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u2206A = tanh \uf8eb \uf8ed 1 | D | (x,y)\u2208D Cm(x, y) \u2212 Cm R (x, y) \uf8f6 \uf8f8", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "Importance Alignment Metric", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "To measure whether \u2206A is significantly greater than 0, we use paired t-test between C m (x, y) and C m R (x, y) for all examples in D. We compute a different measure of \u2206A for each oracle importance score.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Importance Alignment Metric", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "4 Experimental details", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Importance Alignment Metric", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Target models. We measured the importance alignment for six pretrained Transformer language models: BERT base and large (Devlin et al., 2019) ; RoBERTa base and large (Liu et al., 2019) ; and ELECTRA base and large (Clark et al., 2020) . We fine-tuned these models on the combination of the following NLI datasets: SNLI (Bowman et al., 2015), MultiNLI (Williams et al., 2018) , NLI-recast FEVER (Thorne et al., 2019) and ANLI rounds 1-3 (Nie et al., 2020) . We used the hyperparameters in the ANLI codebase for finetuning. 4 We used the same datasets to also train two non-transformer models: a bag-of-words (BOW) 2017, using 3 layers of BiLSTMs (Hochreiter and Schmidhuber, 1997) with residual connections on top of BERT token embeddings.", |
|
"cite_spans": [ |
|
{ |
|
"start": 120, |
|
"end": 141, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 167, |
|
"end": 185, |
|
"text": "(Liu et al., 2019)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 215, |
|
"end": 235, |
|
"text": "(Clark et al., 2020)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 352, |
|
"end": 375, |
|
"text": "(Williams et al., 2018)", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 395, |
|
"end": 416, |
|
"text": "(Thorne et al., 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 437, |
|
"end": 455, |
|
"text": "(Nie et al., 2020)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 523, |
|
"end": 524, |
|
"text": "4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 646, |
|
"end": 680, |
|
"text": "(Hochreiter and Schmidhuber, 1997)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Explanation informed models. To compute soft oracle importance, we trained explanation informed models for each target model architecture on a six-way classification task. We trained three models per architecture using different random seeds. In this task, the input to the model was a context-hypothesis pair concatenated with an annotator-generated explanation. The output of the model was a joint label indicating whether the context entails, contradicts or is neutral with respect to the hypothesis, and whether the explanation matches the context-hypothesis pair. We generated the training (n = 19043) and development (n = 2116) datasets for these classifiers by subsetting the portion of the ANLI training set for which an explanation was provided -i.e., the examples in the training set in which the ANLI annotators had successfully fooled the model. We presented each of the 19043 examples twice when training the explanation informed models: once with a matched explanation (i.e., the original one written by the annotator for that explanation) and once with a randomly selected explanation (see \u00a73.3 above). We trained all models for two epochs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The accuracy of these explanation informed models either matched or surpassed that of the target model despite being trained on only a small subset of the original data (see Table 1 ). This suggests that these models did learn to incorporate information about explanations, and that the information present in the explanations was useful for NLI.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 174, |
|
"end": 181, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "ANLI. We measured importance alignment on the development set of the ANLI dataset. In this dataset, annotators were given a context and a label and were asked to write a hypothesis that fooled a target model; if the model was fooled, annotators explained in natural language why the provided label was correct and why they thought model was fooled into generating an incorrect prediction. While other datasets with natural language explanations for NLI exist, (e.g., e-SNLI; Camburu et al. 2018) , the explanations in these datasets only address why the gold label is correct, and not why the annotators thought the model generated an incorrect prediction. Additionally, the adversarial setting in which ANLI was collected encourages the annotators to reason about models' decision making. These two factors make ANLI more suitable for our purposes than other explanation datasets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 475, |
|
"end": 495, |
|
"text": "Camburu et al. 2018)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Datasets", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Since annotators provided explanations only when the model generated an incorrect prediction, to allow for an apples-to-apples comparison, we only compute importance alignment for examples in which our target models generate an incorrect prediction.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Datasets", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "As a consequence, the specific examples used to measure importance alignment differs across models: different models fail on different examples. To ensure that our results are not driven by the differences between examples, we repeated our analyses on a subset of examples that all models failed on, and we found that our results were nearly identical (compare Table 2 and Table 3 ).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 361, |
|
"end": 381, |
|
"text": "Table 2 and Table 3", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation Datasets", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We collected expert annotations (see \u00a73.1) on 60 randomly sampled examples from the set of examples that all the models failed on, 20 from each round of ANLI. We computed the expert importance alignment score (A E ) for this subset of examples and repeated our analyses.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Datasets", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "HANS. We measured the extent to which importance alignment scores correlated with accuracy on the HANS diagnostic dataset (McCoy et al., 2019) . This dataset measures the extent to which NLI models rely on non-human like heuristics when performing inference, such as inferring that the context entails the hypothesis if all of the words in the hypotheses are in the context: models which do not rely on these nonhuman like heuristics have higher accuracy on the this dataset. If we assume that na\u00efve annotators in general do not expect NLI models to rely on such heuristics, then we might expect models with high importance alignment (i.e., models which make inference decisions in ways annotators expect them to) to also have high accuracy on the HANS dataset.", |
|
"cite_spans": [ |
|
{ |
|
"start": 122, |
|
"end": 142, |
|
"text": "(McCoy et al., 2019)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Datasets", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In Table 2 , we report the importance alignment scores for the hard and soft oracles computed over all the examples the models generated an incorrect prediction for, averaged across the five random seeds. In Table 3 , we report the importance alignment scores for hard, soft and expert oracles computed over the subset of 60 examples with expert annotations. Since the importance alignment scores for the hard and soft oracles are nearly identical across both tables, we focus our discussion of the results from the subset of examples with expert annotations.We repeated the reported analyses with hard and soft oracles on all the examples, and found qualitatively similar results (see Appendix A).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 208, |
|
"end": 215, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Effect of model size on Importance Alignment. Across all three types of Importance Alignment scores (A H , A S and A E ) the base versions of the transformer models had higher importance alignment than their larger counterparts, with BERT-base having the highest importance alignment. To test the statistical significance of this this observation, we fit three linear mixed effect regression models: one for each type of oracle.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We predicted the pair-wise difference between C m (x, y) and C m R (x, y) (see Equation 5 and Equation 6) as a function of the following predictors: model size (base vs. large), model type (BERT vs. ELECTRA and BERT vs. RoBERTa) and the interaction between the two. We included model type and its interaction with model size as predictors to measure the effect of model size over and above the differences between specific models. We also included a random intercept and random slope of model size for every example to incorporate the following assumptions: first, the difference C m (x, y) and C m R (x, y) can differ for every example; second, the difference between base and large models can also differ for every example. The results described below were significant at a threshold of p < 0.005 unless specified otherwise. For further details see Appendix A.", |
|
"cite_spans": [ |
|
{ |
|
"start": 189, |
|
"end": 228, |
|
"text": "(BERT vs. ELECTRA and BERT vs. RoBERTa)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The analyses indicated that across all measures importance alignment, \u2206A was significantly greater for the base models when compared to their larger counterparts. Additionally, \u2206A was significantly greater in BERT models than in RoBERTa and ELECTRA models (base and large).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Although the results suggest that smaller models have stronger importance alignment than their larger counterparts, our experiments with the BInfersent and BOW models suggest that smaller models do not always have higher importance alignment: the importance alignment for both these models is lower than the alignment for BERT-base model (the smallest of the transformer models when taking into consideration both the number of parameters and pre-training size).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "To test the statistical significance of this numerical result, we fit another set of linear mixed effects model where we predicted the pairwise difference between C m (x, y) and C m R (x, y) as a function of model type (BERT-base vs. BInferSent and BERT-base vs. BOW). We also included a random intercept of item. As expected, the importance alignment for the BERT-base model was significantly greater than that for the BInferSent and BOW models for all the measures.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "ANLI accuracy and Importance Alignment. Based on the results from Table 2 and Table 3 , we wondered whether importance alignment might be negatively correlated with NLI accuracy: the base versions of the models which have higher importance alignment have lower accuracy on ANLI compared to their larger counterparts. To test this, we computed a separate value of A H , A S and A E for each random seed of the target model and for each round of ANLI. Then, we computed the product moment (i.e., Pearson's) correlation between model accuracy on NLI and A H , A S and A E . We found a significant correlation between accuracy and A E (r = 0.42; p = 0.008). However, this correlation was being driven entirely by the BOW models, which had both low accuracy and low importance alignment scores. When repeating the analysis with the BOW models excluded, we found that none of the measures of importance alignment were significantly correlated with NLI accuracy (A H : r = \u22120.09 and p = 0.61; A S : r = 0.04 and p = 0.80; A E r = \u22120.04 and p = 0.83; see Figure 2a ). We repeated the analyses for A H and A S for all wrong examples and found that only A H was significantly correlated with accuracy (r = 0.39, p = 0.01; see Appendix A.2) HANS accuracy and Importance Alignment. As discussed earlier, we hypothesized that high importance alignment might result in models relying less on non-human-like heuristics, thereby resulting in higher accuracy on the HANS dataset. We found no such correlation, however (A H : r = 0.04 and p = 0.82; A S : r = \u22120.21 and p = 0.21; A E r = 0.23 and p = 0.17; see Figure 2b) . 5 This lack of correlation is likely a result of the mismatch between how human-likeness is defined in HANS and in our importance alignment measures. In HANS, the targeted heuristics are simple (i.e., can be articulated with a rule), and describe general principles of how models ought not behave if they are to be human-like. In contrast, our measures of importance alignment are derived from example level explanations of how na\u00efve annotators expected models to behave, and as such are not based on any overarching easy-toarticulate principles. When evaluating whether models make decisions as humans expect them to, jointly considering both these definitions of humanlikeness can be useful.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1605, |
|
"end": 1606, |
|
"text": "5", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 66, |
|
"end": 85, |
|
"text": "Table 2 and Table 3", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 1047, |
|
"end": 1056, |
|
"text": "Figure 2a", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1592, |
|
"end": 1602, |
|
"text": "Figure 2b)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Comparing the Importance Alignment Scores. We used hard and soft oracles to automatically measure oracle importance scores. To validate these methods, we computed the Spearman rank correlation between the importance scores derived from these methods and from the manually annotated expert oracle. The hard oracle was moderately correlated with the expert oracle (r = 0.24, p < 0.0001), whereas the soft oracle was 5 When we considered the heuristics separately, we found some marginally significant correlations (see Appendix A.2) more weakly correlated (r = 0.14, p < 0.0001). Additionally, the hard and soft oracle importance scores were also weakly correlated with each other (r = 0.11, p < 0.0001). 6 Taken together, these results suggest that neither the hard nor the soft oracle measures are perfect proxies for expert human importance scores. This imperfection does not impact the conclusions we draw in this paper, however: the results we discussed held true across all the measures.", |
|
"cite_spans": [ |
|
{ |
|
"start": 414, |
|
"end": 415, |
|
"text": "5", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In this paper, we argued that it is important to not only evaluate models on how accurate they are on a given task, but also on whether the decisions that the models make align with how humans expect them to make these decisions. We introduced a measure called Importance Alignment which quantifies the extent to which the parts of the input that non-expert annotators expected to influence models' decisions actually influenced the decisions. To quantify which parts of the input influenced model decisions (model importance), we used Integrated Gradients. To quantify annotator expectations (oracle importance), we proposed three methods of quantifying annotator generated natural language explanations of model behaviour: two automatic and one that relied on expert input.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "As a case study, we applied this method to measure Importance Alignment in eight NLI models (six transformers, an LSTM and a BOW model), using annotator generated explanations from the ANLI dataset. We found that, across all three measures of importance alignment, the base versions of the transformer models had significantly higher importance alignment than their larger counterparts, with BERT-base having the highest importance alignment. Smaller models do not always result in higher importance alignment, however: the BERT-base model had higher alignment than the LSTM and BOW models. Additionally, importance alignment scores were not correlated with model accuracy on ANLI or the HANS diagnostic dataset in most cases. This suggests that importance alignment and accuracy are complementary methods of evaluating models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Future work. There are at least four directions in which this work can be extended. First, future work can evaluate whether our conclusions about model size in NLI models generalizes to smaller transformer models (Turc et al., 2019; Warstadt et al., 2020) and to models trained on other NLP tasks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 213, |
|
"end": 232, |
|
"text": "(Turc et al., 2019;", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 233, |
|
"end": 255, |
|
"text": "Warstadt et al., 2020)", |
|
"ref_id": "BIBREF45" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Second, future work can build on our methods of calculating model and oracle importance. The two methods of automatically computing oracle importance we proposed as a starting point were only moderately correlated with the method that relies of expert input. Future work can develop better methods of measuring oracle importance incorporating the strengths of both. Future work can also explore other existing ways of calculating model importance (e.g., LIME, SHAP and their variants).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Third, future work can generate more detailed datasets of natural language explanations of model behaviour. For example, in the dataset we used, the explanations the annotators provided were an amalgamation of both why the the label was correct and why the model might have been fooled. Additionally, annotators generated explanations only when the model generated an incorrect output. By disentangling these two types of explanations and collecting explanations for when models generate correct outputs future work can separately study whether models succeed and fail in ways people expect them to. Similarly in the dataset we used, each context-hypothesis-label triplet was associated with only one annotator generated explanation. However, it is possible that there are several explanations of model behaviour that are equally valid. Collecting more explanations per triplet can improve our understanding of how people expect models to succeed and fail.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Fourth, future work could explore which factors drive higher importance alignment. For example, we observed that transformer models with fewer parameters had higher importance alignment than models with more parameters. Does this finding apply to non-transformer architectures too? Is there a threshold for model parameters, where this inverse relationship between model size and alignment score breaks down? Additionally, can other factors like model architecture, type of training objective or the types of sentences the models were trained on influence importance alignment? Such exploration can not only result in models better aligned with human expectations, but also improve our understanding of what drives importance alignment.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We proposed a novel metric, Importance Alignment, to measure the extent to which human-generated explanations of model decisions align with how models actually make these decisions. As a case study, we used this metric to evaluate eight different models trained on NLI and found that the BERT-base model had the highest alignment with human-generated explanations. We also found that our metric was not correlated with model accuracy, suggesting that accuracy and Importance Alignment are complementary ways of evaluating models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "A.1 Effect of model size Formula for Transformer models smf.mixedlm(\"fisher_cor_diff~C(model_size, Treatment(reference='base')) * C(model,Treatment(reference='bert'))\", data = transformer_dat, groups = transformer_dat[\"ex_id\"], re_formula=\"~model_size\") Formula for smaller models (BERT-base, InferSent and BOW) smf.mixedlm(\"fisher_cor_diff~C(model,Treatment(reference='bert'))\", data = small_dat, groups = small_dat[\"ex_id\"], re_formula=\"~model_size\") Transformer models Large (vs. Base) -0.32 *** -0.142 *** -0.057 ** -0.173 *** -0.115 *** ELECTRA (vs. BERT) -0.044 *** -0.057 *** -0.048 ** -0.067 *** -0.054 * RoBERTa (vs. BERT) -0.099 *** -0.101 *** -0.106 *** -0.105 *** -0.083 *** ELECTRA : Large -0.070 *** 0.085 *** -0.014 0.109 *** -0.051 RoBERTa : Large -0.044 *** 0.127 *** -0.016 0.134 *** 0.012", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Details about statistical analyses", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Smaller models BOW (vs. BERT-base) -0.203 *** -0.104 *** -0.184 *** -0.125 *** -0.184 *** BInferSent (vs. BERT-base) -0.094 *** -0.120 *** -0.080 *** -0.154 *** -0.080 *** Table 4 : *** , ** and * indicate p < 0.0001, 0.001 and 0.01 respectively and ':' indicates an interaction effect. A separate mixed effects regression model was fit for each column. Negative coefficients for the main effects indicate that the baseline value was greater than the comparison (e.g., Base was greater than Large).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 172, |
|
"end": 179, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A Details about statistical analyses", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We refer to this as an \"oracle\", because we consider importance scores derived from human-generated explanations to be the ground truth.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We used the list of stop words from NLTK(Bird et al., 2009)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/facebookresearch/ anli/blob/master/script/example_scripts/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The results are comparable with Pearson's correlation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank Luke Zettlemoyer, Ana Valeria Gonzalez, the Dynabench team, Roy Schwartz, Oana Maria Camburu, Tom McCoy, and Tal Linzen's Computation and Psycholinguistics lab for their invaluable comments and support.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Eric Horvitz, and Daniel S Weld. 2020a. Optimizing AI for teamwork", |
|
"authors": [ |
|
{ |
|
"first": "Gagan", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Besmira", |
|
"middle": [], |
|
"last": "Nushi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ece", |
|
"middle": [], |
|
"last": "Kamar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2004.13102" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gagan Bansal, Besmira Nushi, Ece Kamar, Eric Horvitz, and Daniel S Weld. 2020a. Optimizing AI for teamwork. arXiv preprint arXiv:2004.13102.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Beyond accuracy: The role of mental models in human-ai team performance", |
|
"authors": [ |
|
{ |
|
"first": "Gagan", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Besmira", |
|
"middle": [], |
|
"last": "Nushi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ece", |
|
"middle": [], |
|
"last": "Kamar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Walter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lasecki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Daniel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Weld", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Horvitz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the AAAI Conference on Human Computation and Crowdsourcing", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "2--11", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gagan Bansal, Besmira Nushi, Ece Kamar, Walter S Lasecki, Daniel S Weld, and Eric Horvitz. 2019a. Beyond accuracy: The role of mental models in human-ai team performance. Proceedings of the AAAI Conference on Human Computation and Crowdsourcing, 7(1):2-11.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Updates in human-AI teams: Understanding and addressing the performance/compatibility tradeoff", |
|
"authors": [ |
|
{ |
|
"first": "Gagan", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Besmira", |
|
"middle": [], |
|
"last": "Nushi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ece", |
|
"middle": [], |
|
"last": "Kamar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Daniel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Weld", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Walter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Lasecki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Horvitz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "33", |
|
"issue": "", |
|
"pages": "2429--2437", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gagan Bansal, Besmira Nushi, Ece Kamar, Daniel S Weld, Walter S Lasecki, and Eric Horvitz. 2019b. Updates in human-AI teams: Understanding and addressing the performance/compatibility tradeoff. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 2429-2437.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Natural Language Processing with Python", |
|
"authors": [ |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Bird", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Loper", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ewan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Steven Bird, Edward Loper, and Ewan Klein. 2009. Natural Language Processing with Python. O'Reilly Media Inc.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "A large annotated corpus for learning natural language inference", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Samuel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gabor", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Angeli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Potts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "632--642", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D15-1075" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Samuel R. Bowman, Gabor Angeli, Christopher Potts, and Christopher D. Manning. 2015. A large annotated corpus for learning natural language inference. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 632-642, Lisbon, Portugal. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Can I trust the explainer? verifying post-hoc explanatory methods", |
|
"authors": [ |
|
{ |
|
"first": "Oana-Maria", |
|
"middle": [], |
|
"last": "Camburu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eleonora", |
|
"middle": [], |
|
"last": "Giunchiglia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Foerster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Lukasiewicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Blunsom", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "NeurIPS 2019 Workshop on Safety and Robustness in Decision Making", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oana-Maria Camburu, Eleonora Giunchiglia, Jakob Foerster, Thomas Lukasiewicz, and Phil Blunsom. 2019. Can I trust the explainer? verifying post-hoc explanatory methods. In NeurIPS 2019 Workshop on Safety and Robustness in Decision Making, Vancouver, Canada.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "The struggles of feature-based explanations: Shapley values vs. minimal sufficient subsets", |
|
"authors": [ |
|
{ |
|
"first": "Oana-Maria", |
|
"middle": [], |
|
"last": "Camburu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eleonora", |
|
"middle": [], |
|
"last": "Giunchiglia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Foerster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Lukasiewicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Blunsom", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the Explainable Agency in Artificial Intelligence Workshop at AAAI 2021", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oana-Maria Camburu, Eleonora Giunchiglia, Jakob Foerster, Thomas Lukasiewicz, and Phil Blunsom. 2021. The struggles of feature-based explanations: Shapley values vs. minimal sufficient subsets. In Proceedings of the Explainable Agency in Artificial Intelligence Workshop at AAAI 2021.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "e-SNLI: Natural language inference with natural language explanations", |
|
"authors": [ |
|
{ |
|
"first": "Oana-Maria", |
|
"middle": [], |
|
"last": "Camburu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rockt\u00e4schel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Lukasiewicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Blunsom", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "31", |
|
"issue": "", |
|
"pages": "9539--9549", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oana-Maria Camburu, Tim Rockt\u00e4schel, Thomas Lukasiewicz, and Phil Blunsom. 2018. e-SNLI: Natural language inference with natural language explanations. In Advances in Neural Information Processing Systems, volume 31, pages 9539-9549. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "ELECTRA: pretraining text encoders as discriminators rather than generators", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minh-Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "8th International Conference on Learning Representations", |
|
"volume": "2020", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kevin Clark, Minh-Thang Luong, Quoc V. Le, and Christopher D. Manning. 2020. ELECTRA: pre- training text encoders as discriminators rather than generators. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Supervised learning of universal sentence representations from natural language inference data", |
|
"authors": [ |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Holger", |
|
"middle": [], |
|
"last": "Schwenk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lo\u00efc", |
|
"middle": [], |
|
"last": "Barrault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bordes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "670--680", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D17-1070" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexis Conneau, Douwe Kiela, Holger Schwenk, Lo\u00efc Barrault, and Antoine Bordes. 2017. Supervised learning of universal sentence representations from natural language inference data. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 670-680, Copenhagen, Denmark. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "The PASCAL recognising textual entailment challenge", |
|
"authors": [ |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Ido Dagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bernardo", |
|
"middle": [], |
|
"last": "Glickman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Magnini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Machine learning challenges. evaluating predictive uncertainty, visual object classification, and recognising tectual entailment", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "177--190", |
|
"other_ids": { |
|
"DOI": [ |
|
"https://link.springer.com/chapter/10.1007/11736790_9" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ido Dagan, Oren Glickman, and Bernardo Magnini. 2006. The PASCAL recognising textual entailment challenge. In Machine learning challenges. evaluating predictive uncertainty, visual object classification, and recognising tectual entailment, pages 177-190. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Human attention in visual question answering: Do humans and deep networks look at the same regions?", |
|
"authors": [ |
|
{ |
|
"first": "Abhishek", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Harsh", |
|
"middle": [], |
|
"last": "Agrawal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Larry", |
|
"middle": [], |
|
"last": "Zitnick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devi", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhruv", |
|
"middle": [], |
|
"last": "Batra", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "932--937", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D16-1092" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abhishek Das, Harsh Agrawal, Larry Zitnick, Devi Parikh, and Dhruv Batra. 2016. Human attention in visual question answering: Do humans and deep networks look at the same regions? In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 932- 937, Austin, Texas. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "On the \"probable error\" of a coefficient of correlation deduced from a small sample", |
|
"authors": [ |
|
{ |
|
"first": "Ronald Aylmer", |
|
"middle": [], |
|
"last": "Fisher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1921, |
|
"venue": "Biometrika", |
|
"volume": "10", |
|
"issue": "4", |
|
"pages": "507--521", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ronald Aylmer Fisher. 1921. On the \"probable error\" of a coefficient of correlation deduced from a small sample. Biometrika, 10(4):507-521.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Interpretation of neural networks is fragile", |
|
"authors": [ |
|
{ |
|
"first": "Amirata", |
|
"middle": [], |
|
"last": "Ghorbani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abubakar", |
|
"middle": [], |
|
"last": "Abid", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Zou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "33", |
|
"issue": "", |
|
"pages": "3681--3688", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1609/aaai.v33i01.33013681" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amirata Ghorbani, Abubakar Abid, and James Zou. 2019. Interpretation of neural networks is fragile. Proceedings of the AAAI Conference on Artificial Intelligence, 33(01):3681-3688.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Annotation artifacts in natural language inference data", |
|
"authors": [ |
|
{ |
|
"first": "Swabha", |
|
"middle": [], |
|
"last": "Suchin Gururangan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Swayamdipta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roy", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Bowman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "107--112", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-2017" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Suchin Gururangan, Swabha Swayamdipta, Omer Levy, Roy Schwartz, Samuel Bowman, and Noah A. Smith. 2018. Annotation artifacts in natural language inference data. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 107-112, New Orleans, Louisiana. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Evaluating explainable AI: Which algorithmic explanations help users predict model behavior?", |
|
"authors": [ |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Hase", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5540--5552", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.491" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter Hase and Mohit Bansal. 2020. Evaluating explainable AI: Which algorithmic explanations help users predict model behavior? In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 5540-5552, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Long short-term memory", |
|
"authors": [ |
|
{ |
|
"first": "Sepp", |
|
"middle": [], |
|
"last": "Hochreiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00fcrgen", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Neural computation", |
|
"volume": "9", |
|
"issue": "8", |
|
"pages": "1735--1780", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation, 9(8):1735-1780.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Aligning faithful interpretations with their social attribution. arXiV, abs", |
|
"authors": [ |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Jacovi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alon Jacovi and Yoav Goldberg. 2020. Aligning faithful interpretations with their social attribution. arXiV, abs/2006.01067.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Attention is not Explanation", |
|
"authors": [ |
|
{ |
|
"first": "Sarthak", |
|
"middle": [], |
|
"last": "Jain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Byron", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Wallace", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "3543--3556", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1357" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sarthak Jain and Byron C. Wallace. 2019. Attention is not Explanation. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 3543-3556, Minneapolis, Minnesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "XplaiNLI: Explainable natural language inference through visual analytics", |
|
"authors": [ |
|
{ |
|
"first": "Aikaterini-Lida", |
|
"middle": [], |
|
"last": "Kalouli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rita", |
|
"middle": [], |
|
"last": "Sevastjanova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Valeria", |
|
"middle": [], |
|
"last": "De Paiva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Crouch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mennatallah", |
|
"middle": [], |
|
"last": "El-Assady", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 28th International Conference on Computational Linguistics: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "48--52", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aikaterini-Lida Kalouli, Rita Sevastjanova, Valeria de Paiva, Richard Crouch, and Mennatallah El- Assady. 2020. XplaiNLI: Explainable natural language inference through visual analytics. In Proceedings of the 28th International Conference on Computational Linguistics: System Demonstrations, pages 48-52.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Dynabench: Rethinking benchmarking in NLP", |
|
"authors": [ |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Bartolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yixin", |
|
"middle": [], |
|
"last": "Nie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Divyansh", |
|
"middle": [], |
|
"last": "Kaushik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Atticus", |
|
"middle": [], |
|
"last": "Geiger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhengxuan", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bertie", |
|
"middle": [], |
|
"last": "Vidgen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Grusha", |
|
"middle": [], |
|
"last": "Prasad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanpreet", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pratik", |
|
"middle": [], |
|
"last": "Ringshia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyi", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tristan", |
|
"middle": [], |
|
"last": "Thrush", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Riedel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zeerak", |
|
"middle": [], |
|
"last": "Waseem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pontus", |
|
"middle": [], |
|
"last": "Stenetorp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Potts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adina", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4110--4124", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2021.naacl-main.324" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Douwe Kiela, Max Bartolo, Yixin Nie, Divyansh Kaushik, Atticus Geiger, Zhengxuan Wu, Bertie Vidgen, Grusha Prasad, Amanpreet Singh, Pratik Ringshia, Zhiyi Ma, Tristan Thrush, Sebastian Riedel, Zeerak Waseem, Pontus Stenetorp, Robin Jia, Mohit Bansal, Christopher Potts, and Adina Williams. 2021. Dynabench: Rethinking benchmarking in NLP. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 4110-4124, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "The mythos of model interpretability", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zachary C Lipton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Queue", |
|
"volume": "16", |
|
"issue": "3", |
|
"pages": "31--57", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zachary C Lipton. 2018. The mythos of model interpretability. Queue, 16(3):31-57.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Roberta: A robustly optimized BERT pretraining approach. arXiv", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized BERT pretraining approach. arXiv.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "A unified approach to interpreting model predictions", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Scott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Su-In", |
|
"middle": [], |
|
"last": "Lundberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4765--4774", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Scott M. Lundberg and Su-In Lee. 2017. A unified approach to interpreting model predictions. In Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pages 4765-4774.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Right for the wrong reasons: Diagnosing syntactic heuristics in natural language inference", |
|
"authors": [ |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Mccoy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ellie", |
|
"middle": [], |
|
"last": "Pavlick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tal", |
|
"middle": [], |
|
"last": "Linzen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3428--3448", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1334" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tom McCoy, Ellie Pavlick, and Tal Linzen. 2019. Right for the wrong reasons: Diagnosing syntactic heuristics in natural language inference. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 3428-3448, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Shortcut-stacked sentence encoders for multi-domain inference", |
|
"authors": [ |
|
{ |
|
"first": "Yixin", |
|
"middle": [], |
|
"last": "Nie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2nd Workshop on Evaluating Vector Space Representations for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "41--45", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W17-5308" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yixin Nie and Mohit Bansal. 2017. Shortcut-stacked sentence encoders for multi-domain inference. In Proceedings of the 2nd Workshop on Evaluating Vector Space Representations for NLP, pages 41-45, Copenhagen, Denmark. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Adversarial NLI: A new benchmark for natural language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Yixin", |
|
"middle": [], |
|
"last": "Nie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adina", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Dinan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4885--4901", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.441" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yixin Nie, Adina Williams, Emily Dinan, Mohit Bansal, Jason Weston, and Douwe Kiela. 2020. Adversarial NLI: A new benchmark for natural language understanding. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 4885-4901, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Reducing non-normative text generation from language models", |
|
"authors": [ |
|
{ |
|
"first": "Xiangyu", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siyan", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Spencer", |
|
"middle": [], |
|
"last": "Frazier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Riedl", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 13th International Conference on Natural Language Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "374--383", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiangyu Peng, Siyan Li, Spencer Frazier, and Mark Riedl. 2020. Reducing non-normative text generation from language models. In Proceedings of the 13th International Conference on Natural Language Generation, pages 374-383, Dublin, Ireland. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Hypothesis only baselines in natural language inference", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Poliak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Naradowsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aparajita", |
|
"middle": [], |
|
"last": "Haldar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rachel", |
|
"middle": [], |
|
"last": "Rudinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Seventh Joint Conference on Lexical and Computational Semantics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "180--191", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/S18-2023" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Poliak, Jason Naradowsky, Aparajita Haldar, Rachel Rudinger, and Benjamin Van Durme. 2018. Hypothesis only baselines in natural language inference. In Proceedings of the Seventh Joint Conference on Lexical and Computational Semantics, pages 180-191, New Orleans, Louisiana. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Explain yourself! leveraging language models for commonsense reasoning", |
|
"authors": [ |
|
{ |
|
"first": "Bryan", |
|
"middle": [], |
|
"last": "Nazneen Fatema Rajani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Mccann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4932--4942", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1487" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nazneen Fatema Rajani, Bryan McCann, Caiming Xiong, and Richard Socher. 2019. Explain yourself! leveraging language models for commonsense reasoning. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 4932-4942, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Are red roses red? evaluating consistency of question-answering models", |
|
"authors": [ |
|
{ |
|
"first": "Carlos", |
|
"middle": [], |
|
"last": "Marco T\u00falio Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Guestrin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Conference of the Association for Computational Linguistics, ACL 2019", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "6174--6184", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/p19-1621" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco T\u00falio Ribeiro, Carlos Guestrin, and Sameer Singh. 2019. Are red roses red? evaluating consistency of question-answering models. In Proceedings of the 57th Conference of the Association for Computational Linguistics, ACL 2019, Florence, Italy, July 28-August 2, 2019, Volume 1: Long Papers, pages 6174-6184. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Why should I trust you?\": Explaining the predictions of any classifier", |
|
"authors": [ |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Marco T\u00falio Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carlos", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Guestrin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1135--1144", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/2939672.2939778" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco T\u00falio Ribeiro, Sameer Singh, and Carlos Guestrin. 2016. \"Why should I trust you?\": Explaining the predictions of any classifier. In Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, San Francisco, CA, USA, August 13-17, 2016, pages 1135-1144. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Semantically equivalent adversarial rules for debugging NLP models", |
|
"authors": [ |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Marco T\u00falio Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carlos", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Guestrin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics, ACL 2018", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "856--865", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-1079" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco T\u00falio Ribeiro, Sameer Singh, and Carlos Guestrin. 2018. Semantically equivalent adversarial rules for debugging NLP models. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics, ACL 2018, Melbourne, Australia, July 15-20, 2018, Volume 1: Long Papers, pages 856-865. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Research priorities for robust and beneficial artificial intelligence", |
|
"authors": [ |
|
{ |
|
"first": "Stuart", |
|
"middle": [], |
|
"last": "Russell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Dewey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Tegmark", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "36", |
|
"issue": "", |
|
"pages": "105--114", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stuart Russell, Daniel Dewey, and Max Tegmark. 2015. Research priorities for robust and beneficial artificial intelligence. Ai Magazine, 36(4):105-114.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Squinting at VQA models: Introspecting VQA models with subquestions", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Ramprasaath", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Purva", |
|
"middle": [], |
|
"last": "Selvaraju", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devi", |
|
"middle": [], |
|
"last": "Tendulkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Horvitz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Besmira", |
|
"middle": [], |
|
"last": "T\u00falio Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ece", |
|
"middle": [], |
|
"last": "Nushi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kamar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition", |
|
"volume": "2020", |
|
"issue": "", |
|
"pages": "10000--10008", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/CVPR42600.2020.01002" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ramprasaath R. Selvaraju, Purva Tendulkar, Devi Parikh, Eric Horvitz, Marco T\u00falio Ribeiro, Besmira Nushi, and Ece Kamar. 2020. Squinting at VQA models: Introspecting VQA models with sub- questions. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2020, Seattle, WA, USA, June 13-19, 2020, pages 10000-10008. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Andr\u00e9 Freitas, and Siegfried Handschuh. 2020. XTE: Explainable text entailment", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Vivian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Silva", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2009.12431" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vivian S Silva, Andr\u00e9 Freitas, and Siegfried Handschuh. 2020. XTE: Explainable text entailment. arXiv preprint arXiv:2009.12431.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Fooling LIME and SHAP: adversarial attacks on post hoc explanation methods", |
|
"authors": [ |
|
{ |
|
"first": "Dylan", |
|
"middle": [], |
|
"last": "Slack", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sophie", |
|
"middle": [], |
|
"last": "Hilgard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Himabindu", |
|
"middle": [], |
|
"last": "Lakkaraju", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "AIES '20: AAAI/ACM Conference on AI, Ethics, and Society", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "180--186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3375627.3375830" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dylan Slack, Sophie Hilgard, Emily Jia, Sameer Singh, and Himabindu Lakkaraju. 2020. Fooling LIME and SHAP: adversarial attacks on post hoc explanation methods. In AIES '20: AAAI/ACM Conference on AI, Ethics, and Society, New York, NY, USA, February 7-8, 2020, pages 180-186. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Axiomatic attribution for deep networks", |
|
"authors": [ |
|
{ |
|
"first": "Mukund", |
|
"middle": [], |
|
"last": "Sundararajan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ankur", |
|
"middle": [], |
|
"last": "Taly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qiqi", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 34th International Conference on Machine Learning", |
|
"volume": "70", |
|
"issue": "", |
|
"pages": "3319--3328", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mukund Sundararajan, Ankur Taly, and Qiqi Yan. 2017. Axiomatic attribution for deep networks. In Proceedings of the 34th International Conference on Machine Learning, ICML 2017, Sydney, NSW, Australia, 6-11 August 2017, volume 70 of Proceedings of Machine Learning Research, pages 3319-3328. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Christos Christodoulopoulos, and Arpit Mittal. 2019. The FEVER2.0 shared task", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Thorne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Vlachos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oana", |
|
"middle": [], |
|
"last": "Cocarascu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the Second Workshop on Fact Extraction and VERification (FEVER)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--6", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-6601" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James Thorne, Andreas Vlachos, Oana Cocarascu, Christos Christodoulopoulos, and Arpit Mittal. 2019. The FEVER2.0 shared task. In Proceedings of the Second Workshop on Fact Extraction and VERification (FEVER), pages 1-6, Hong Kong, China. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Performance impact caused by hidden bias of training data for recognizing textual entailment", |
|
"authors": [ |
|
{ |
|
"first": "Masatoshi", |
|
"middle": [], |
|
"last": "Tsuchiya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Masatoshi Tsuchiya. 2018. Performance impact caused by hidden bias of training data for recognizing textual entailment. In Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), Miyazaki, Japan. European Language Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Well-read students learn better: The impact of student initialization on knowledge distillation", |
|
"authors": [ |
|
{ |
|
"first": "Iulia", |
|
"middle": [], |
|
"last": "Turc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Iulia Turc, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Well-read students learn better: The impact of student initialization on knowledge distillation. CoRR, abs/1908.08962.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Universal adversarial triggers for attacking and analyzing NLP", |
|
"authors": [ |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Wallace", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shi", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikhil", |
|
"middle": [], |
|
"last": "Kandpal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2153--2162", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1221" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eric Wallace, Shi Feng, Nikhil Kandpal, Matt Gardner, and Sameer Singh. 2019. Universal adversarial triggers for attacking and analyzing NLP. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 2153-2162, Hong Kong, China. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "Learning helpful inductive biases from self-supervised pretraining", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Warstadt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yian", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaocheng", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haokun", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "217--235", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Warstadt, Yian Zhang, Xiaocheng Li, Haokun Liu, and Samuel R. Bowman. 2020. Learning helpful inductive biases from self-supervised pretraining. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 217-235, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Attention is not not explanation", |
|
"authors": [ |
|
{ |
|
"first": "Sarah", |
|
"middle": [], |
|
"last": "Wiegreffe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuval", |
|
"middle": [], |
|
"last": "Pinter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "11--20", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1002" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sarah Wiegreffe and Yuval Pinter. 2019. Attention is not not explanation. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 11-20, Hong Kong, China. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "A broad-coverage challenge corpus for sentence understanding through inference", |
|
"authors": [ |
|
{ |
|
"first": "Adina", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikita", |
|
"middle": [], |
|
"last": "Nangia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1112--1122", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-1101" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adina Williams, Nikita Nangia, and Samuel Bowman. 2018. A broad-coverage challenge corpus for sentence understanding through inference. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 1112- 1122, New Orleans, Louisiana. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "Understanding the effect of accuracy on trust in machine learning models", |
|
"authors": [ |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Yin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jennifer", |
|
"middle": [ |
|
"Wortman" |
|
], |
|
"last": "Vaughan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hanna", |
|
"middle": [], |
|
"last": "Wallach", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 CHI conference on human factors in computing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--12", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ming Yin, Jennifer Wortman Vaughan, and Hanna Wallach. 2019. Understanding the effect of accuracy on trust in machine learning models. In Proceedings of the 2019 CHI conference on human factors in computing systems, pages 1-12.", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "Effect of confidence and explanation on accuracy and trust calibration in ai-assisted decision making", |
|
"authors": [ |
|
{ |
|
"first": "Yunfeng", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vera", |
|
"middle": [], |
|
"last": "Liao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bellamy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Fairness, Accountability, and Transparency", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "295--305", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yunfeng Zhang, Q Vera Liao, and Rachel KE Bellamy. 2020. Effect of confidence and explanation on accuracy and trust calibration in ai-assisted decision making. In Proceedings of the 2020 Conference on Fairness, Accountability, and Transparency, pages 295-305.", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "LIREx: Augmenting language inference with relevant explanation", |
|
"authors": [ |
|
{ |
|
"first": "Xinyan", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Vinod Vydiswaran", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2012.09157" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xinyan Zhao and V. G. Vinod Vydiswaran. 2020. LIREx: Augmenting language inference with relevant explanation. arXiv preprint arXiv:2012.09157.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF1": { |
|
"num": null, |
|
"text": "Accuracy on the development partition of the ANLI dataset for target models (finetuned on MNLI + SNLI + ANLI + re-cast FEVER) and models used as the soft oracle (finetuned on 6-way NLI and reason classification on a subset of ANLI).", |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"text": "Importance Alignment between model importance scores and oracle importance scores (both A", |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF5": { |
|
"num": null, |
|
"text": "", |
|
"content": "<table><tr><td>: Importance Alignment between model</td></tr><tr><td>importance scores and oracle importance scores (A H ,</td></tr><tr><td>A S and A E metrics) across 5 random seeds on the</td></tr><tr><td>ANLI dataset. \u2206A was computed over the 60</td></tr><tr><td>examples with expert annotations. '*'s indicate</td></tr><tr><td>whether \u2206A is significantly greater than 0. '***'</td></tr><tr><td>indicates p < 0.001, '**' indicates p < 0.01 and '*'</td></tr><tr><td>indicates p < 0.05.</td></tr></table>", |
|
"type_str": "table", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |