|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T01:09:18.347188Z" |
|
}, |
|
"title": "The Explanation Game: Towards Prediction Explainability through Sparse Communication", |
|
"authors": [ |
|
{ |
|
"first": "Marcos", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Treviso", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "T\u00e9cnico University of Lisbon", |
|
"location": { |
|
"country": "Portugal" |
|
} |
|
}, |
|
"email": "marcos.treviso@tecnico.ulisboa.pt" |
|
}, |
|
{ |
|
"first": "Andr\u00e9", |
|
"middle": [ |
|
"F T" |
|
], |
|
"last": "Martins", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Instituto de Telecomunica\u00e7\u00f5es LUMLIS (Lisbon ELLIS Unit) Instituto Superior T\u00e9cnico & Unbabel Lisbon", |
|
"institution": "", |
|
"location": { |
|
"country": "Portugal" |
|
} |
|
}, |
|
"email": "andre.t.martins@tecnico.ulisboa.com" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Explainability is a topic of growing importance in NLP. In this work, we provide a unified perspective of explainability as a communication problem between an explainer and a layperson about a classifier's decision. We use this framework to compare several explainers, including gradient methods, erasure, and attention mechanisms, in terms of their communication success. In addition, we reinterpret these methods in the light of classical feature selection, and use this as inspiration for new embedded explainers, through the use of selective, sparse attention. Experiments in text classification and natural language inference, using different configurations of explainers and laypeople (including both machines and humans), reveal an advantage of attention-based explainers over gradient and erasure methods, and show that selective attention is a simpler alternative to stochastic rationalizers. Human experiments show strong results on text classification with post-hoc explainers trained to optimize communication success.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Explainability is a topic of growing importance in NLP. In this work, we provide a unified perspective of explainability as a communication problem between an explainer and a layperson about a classifier's decision. We use this framework to compare several explainers, including gradient methods, erasure, and attention mechanisms, in terms of their communication success. In addition, we reinterpret these methods in the light of classical feature selection, and use this as inspiration for new embedded explainers, through the use of selective, sparse attention. Experiments in text classification and natural language inference, using different configurations of explainers and laypeople (including both machines and humans), reveal an advantage of attention-based explainers over gradient and erasure methods, and show that selective attention is a simpler alternative to stochastic rationalizers. Human experiments show strong results on text classification with post-hoc explainers trained to optimize communication success.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The widespread use of machine learning to assist humans in decision making brings the need for explaining models' predictions (Doshi-Velez, 2017; Lipton, 2018; Rudin, 2019; Miller, 2019) . This poses a challenge in NLP, where current state-ofthe-art neural systems are generally opaque (Goldberg and Hirst, 2017; Peters et al., 2018; Devlin et al., 2019) . Despite the large body of recent work (reviewed in \u00a77), a unified perspective modeling the human-machine interaction-a communication process in its essence-is still missing.", |
|
"cite_spans": [ |
|
{ |
|
"start": 126, |
|
"end": 145, |
|
"text": "(Doshi-Velez, 2017;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 146, |
|
"end": 159, |
|
"text": "Lipton, 2018;", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 160, |
|
"end": 172, |
|
"text": "Rudin, 2019;", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 173, |
|
"end": 186, |
|
"text": "Miller, 2019)", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 286, |
|
"end": 312, |
|
"text": "(Goldberg and Hirst, 2017;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 313, |
|
"end": 333, |
|
"text": "Peters et al., 2018;", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 334, |
|
"end": 354, |
|
"text": "Devlin et al., 2019)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Many methods have been proposed to generate explanations. Some neural network architectures are equipped with built-in components-attention mechanisms-which weigh the relevance of input features for triggering a decision (Bahdanau =\u0177. Both the explainer and layperson can be humans or machines. Vaswani et al., 2017) . Top-k attention weights provide plausible, but not always faithful, explanations (Jain and Wallace, 2019; Serrano and Smith, 2019; Wiegreffe and Pinter, 2019) . Rationalizers with hard attention are arguably more faithful, but require stochastic networks, which are harder to train (Lei et al., 2016; . Other approaches include gradient methods (Li et al., 2016a; Arras et al., 2017) , querying the classifier with leave-one-out strategies (Li et al., 2016a; Feng et al., 2018) , or training local sparse classifiers (Ribeiro et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 221, |
|
"end": 230, |
|
"text": "(Bahdanau", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 295, |
|
"end": 316, |
|
"text": "Vaswani et al., 2017)", |
|
"ref_id": "BIBREF52" |
|
}, |
|
{ |
|
"start": 400, |
|
"end": 424, |
|
"text": "(Jain and Wallace, 2019;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 425, |
|
"end": 449, |
|
"text": "Serrano and Smith, 2019;", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 450, |
|
"end": 477, |
|
"text": "Wiegreffe and Pinter, 2019)", |
|
"ref_id": "BIBREF53" |
|
}, |
|
{ |
|
"start": 601, |
|
"end": 619, |
|
"text": "(Lei et al., 2016;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 664, |
|
"end": 682, |
|
"text": "(Li et al., 2016a;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 683, |
|
"end": 702, |
|
"text": "Arras et al., 2017)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 759, |
|
"end": 777, |
|
"text": "(Li et al., 2016a;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 778, |
|
"end": 796, |
|
"text": "Feng et al., 2018)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 836, |
|
"end": 858, |
|
"text": "(Ribeiro et al., 2016)", |
|
"ref_id": "BIBREF44" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "How should these different approaches be compared? Several diagnostic tests have been proposed: Jain and Wallace (2019) assessed the explanatory power of attention weights by measuring their correlation with input gradients; Wiegreffe and Pinter (2019) and DeYoung et al. (2020) developed more informative tests, including a combination of comprehensiveness and sufficiency metrics and the correlation with human rationales; Jacovi and Goldberg (2020) proposed a set of evaluation recommendations and a graded notion of faithfulness. Most proposed frameworks rely on correlations and counterfactual simulation, sidestepping the main practical goal of prediction explainability-the ability to communicate an explanation to a human user.", |
|
"cite_spans": [ |
|
{ |
|
"start": 225, |
|
"end": 252, |
|
"text": "Wiegreffe and Pinter (2019)", |
|
"ref_id": "BIBREF53" |
|
}, |
|
{ |
|
"start": 257, |
|
"end": 278, |
|
"text": "DeYoung et al. (2020)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 425, |
|
"end": 451, |
|
"text": "Jacovi and Goldberg (2020)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this work, we fill the gap above by proposing a unified framework that regards explainability as a communication problem. Our framework is inspired by human-grounded evaluation through forward simulation/prediction, as proposed by Doshi-Velez (2017, \u00a73.2) , where humans are presented with an explanation and an input, and must correctly simulate the model's output (regardless of the true output). We model this process as shown in Figure 1 , by considering the interaction between a classifier (the model whose predictions we want to explain), an explainer (which provides the explanations), and a layperson (which must recover the classifier's prediction). We show that different configurations of these components correspond to previously proposed explanation methods, and we experiment with explainers and laypeople being both humans and machines. Our framework also inspires two new methods: embedded explainers based on selective attention (Martins and Astudillo, 2016; Peters et al., 2019) , and trainable explainers based on emergent communication (Foerster et al., 2016; Lazaridou et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 234, |
|
"end": 258, |
|
"text": "Doshi-Velez (2017, \u00a73.2)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 950, |
|
"end": 979, |
|
"text": "(Martins and Astudillo, 2016;", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 980, |
|
"end": 1000, |
|
"text": "Peters et al., 2019)", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 1060, |
|
"end": 1083, |
|
"text": "(Foerster et al., 2016;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 1084, |
|
"end": 1107, |
|
"text": "Lazaridou et al., 2016)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 436, |
|
"end": 444, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Overall, our contributions are:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We draw a link between recent techniques for explainability of neural networks and classic feature selection in linear models ( \u00a72). This leads to new embedded methods for explainability through selective, sparse attention ( \u00a73).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We propose a new framework to assess explanatory power as the communication success rate between an explainer and a layperson ( \u00a74).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We experiment with text classification, natural language inference, and machine translation, using different configurations of explainers and laypeople, both machines ( \u00a75) and humans ( \u00a76).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "A common way of generating explanations is by highlighting rationales (Zaidan and Eisner, 2008) . The principle of parsimony (\"Occam's razor\") advocates simple explanations over complex ones. This principle inspired a large body of work in traditional feature selection for linear models. We draw here a link between that work and modern approaches to explainability. Table 1 highlights the connections. Traditional feature selection methods (Guyon and Elisseeff, 2003) are mostly concerned with model interpretability, i.e., understanding how models behave globally. Feature selection happens statically during model training, after which irrelevant features are permanently deleted from the model. This contrasts with prediction explainability in neural networks, where feature selection happens dynamically at run time: here explanations are inputdependent, hence a feature not relevant for a particular input can be relevant for another. Are these two worlds far away? Guyon and Elisseeff (2003, \u00a74) proposed a typology for traditional feature selection with three classes of methods, distinguished by how they model the interaction between their main two components, the feature selector and the learning algorithm. We argue that this typology can also be used to characterize various explanation methods, if we replace these two components by the explainer E and the classifier C, respectively.", |
|
"cite_spans": [ |
|
{ |
|
"start": 70, |
|
"end": 95, |
|
"text": "(Zaidan and Eisner, 2008)", |
|
"ref_id": "BIBREF56" |
|
}, |
|
{ |
|
"start": 442, |
|
"end": 469, |
|
"text": "(Guyon and Elisseeff, 2003)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 973, |
|
"end": 1003, |
|
"text": "Guyon and Elisseeff (2003, \u00a74)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 368, |
|
"end": 375, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Revisiting Feature Selection", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "\u2022 Wrapper methods, in the wording of Guyon and Elisseeff (2003) , \"utilize the learning machine of interest as a black box to score subsets of variables according to their predictive power.\" This means greedily searching over subsets of features, training a model with each candidate subset. In the dynamic feature selection world, this is somewhat reminiscent of the leave-oneout method of Li et al. (2016b) , the ablative approach of Serrano and Smith (2019) , and LIME (Ribeiro et al., 2016), which repeatedly queries the classifier to label new examples.", |
|
"cite_spans": [ |
|
{ |
|
"start": 37, |
|
"end": 63, |
|
"text": "Guyon and Elisseeff (2003)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 391, |
|
"end": 408, |
|
"text": "Li et al. (2016b)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 436, |
|
"end": 460, |
|
"text": "Serrano and Smith (2019)", |
|
"ref_id": "BIBREF47" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Revisiting Feature Selection", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "\u2022 Filter methods decide to include/exclude a feature based on an importance metric (such as feature counts or pairwise mutual information). This can be done as a preprocessing step or by training the model once and thresholding the feature weights. In dynamic feature selection, this is done when we examine the gradient of the prediction with respect to each input feature, and then select the features whose gradients have large magnitude (Li et al., 2016a; Arras et al., 2016 ; Jain and Wallace, 2019), 1 and when thresholding softmax attention scores to select relevant input features, as analyzed by Jain and Wallace (2019) and Wiegreffe and Pinter (2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 441, |
|
"end": 459, |
|
"text": "(Li et al., 2016a;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 460, |
|
"end": 478, |
|
"text": "Arras et al., 2016", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 605, |
|
"end": 628, |
|
"text": "Jain and Wallace (2019)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 633, |
|
"end": 660, |
|
"text": "Wiegreffe and Pinter (2019)", |
|
"ref_id": "BIBREF53" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Revisiting Feature Selection", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "\u2022 Embedded methods, in traditional feature selection, embed feature selection within the learning algorithm by using a sparse regularizer such as the 1 -norm (Tibshirani, 1996) . Features that receive zero weight become irrelevant and can Static selection (model interpretability) Dynamic selection (prediction explainability)", |
|
"cite_spans": [ |
|
{ |
|
"start": 158, |
|
"end": 176, |
|
"text": "(Tibshirani, 1996)", |
|
"ref_id": "BIBREF49" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Revisiting Feature Selection", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Forward selection, backward elimination (Kohavi and John, 1997) Input reduction (Feng et al., 2018) , representation erasure (leave-one-out) (Li et al., 2016b; Serrano and Smith, 2019) , LIME (Ribeiro et al., 2016) Filters Pointwise mutual information (Church and Hanks, 1989) , recursive feature elimination (Guyon et al., 2002) Input gradient (Li et al., 2016a) , layerwise relevance propagation (Bach et al., 2015) , top-k softmax attention Embedded 1-regularization (Tibshirani, 1996) , elastic net (Zou and Hastie, 2005) Stochastic attention (Xu et al., 2015; Lei et al., 2016; , sparse attention (this paper, \u00a73) be removed from the model. In dynamic feature selection, this encompasses methods where the classifier produces rationales together with its decisions (Lei et al., 2016; . We propose in \u00a73 an alternative approach via sparse attention (Martins and Astudillo, 2016; Peters et al., 2019) , where the selection of words for the rationale resembles 1 -regularization.", |
|
"cite_spans": [ |
|
{ |
|
"start": 40, |
|
"end": 63, |
|
"text": "(Kohavi and John, 1997)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 80, |
|
"end": 99, |
|
"text": "(Feng et al., 2018)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 141, |
|
"end": 159, |
|
"text": "(Li et al., 2016b;", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 160, |
|
"end": 184, |
|
"text": "Serrano and Smith, 2019)", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 187, |
|
"end": 214, |
|
"text": "LIME (Ribeiro et al., 2016)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 252, |
|
"end": 276, |
|
"text": "(Church and Hanks, 1989)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 309, |
|
"end": 329, |
|
"text": "(Guyon et al., 2002)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 345, |
|
"end": 363, |
|
"text": "(Li et al., 2016a)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 398, |
|
"end": 417, |
|
"text": "(Bach et al., 2015)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 470, |
|
"end": 488, |
|
"text": "(Tibshirani, 1996)", |
|
"ref_id": "BIBREF49" |
|
}, |
|
{ |
|
"start": 503, |
|
"end": 525, |
|
"text": "(Zou and Hastie, 2005)", |
|
"ref_id": "BIBREF57" |
|
}, |
|
{ |
|
"start": 547, |
|
"end": 564, |
|
"text": "(Xu et al., 2015;", |
|
"ref_id": "BIBREF54" |
|
}, |
|
{ |
|
"start": 565, |
|
"end": 582, |
|
"text": "Lei et al., 2016;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 770, |
|
"end": 788, |
|
"text": "(Lei et al., 2016;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 853, |
|
"end": 882, |
|
"text": "(Martins and Astudillo, 2016;", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 883, |
|
"end": 903, |
|
"text": "Peters et al., 2019)", |
|
"ref_id": "BIBREF42" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Wrappers", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In \u00a74, we frame each of the cases above as a communication process, where the explainer E aims to communicate a short message with the relevant features that triggered the classifier C's decisions to a layperson L. The three cases above are distinguished by the way C and E interact.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Wrappers", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The case where the explainer E is embedded in the classifier C naturally favors faithfulness, since the mechanism that explains the decision (the why) can also influence it (the how).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Embedded Sparse Attention", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Attention mechanisms (Bahdanau et al., 2015 ) allow visualizing relevant input features that contributed to the model's decision. However, the traditional softmax-based attention is dense, i.e., it gives some probability mass to every feature, even if small. The typical approach is to select the top-k words with largest attention weights as the explanation. However, this is not a truly embedded method, but rather a filter, and as pointed out by Jain and Wallace (2019) and Wiegreffe and Pinter (2019) , it may not lead to faithful explanations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 43, |
|
"text": "(Bahdanau et al., 2015", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 477, |
|
"end": 504, |
|
"text": "Wiegreffe and Pinter (2019)", |
|
"ref_id": "BIBREF53" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Embedded Sparse Attention", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "An alternative is to embed in the classifier an attention mechanism that is inherently selective, i.e., which can produce sparse attention distributions natively, where some input features receive exactly zero attention. An extreme example is hard attention, which, as argued by DeYoung et al. 2020, provides more faithful explanations \"by construction\" as they discretely extract snippets from the input to pass to the classifier. A problem with hard attention is its non-differentiability, which complicates training (Lei et al., 2016; . We consider in this paper a different approach: using end-to-end differentiable sparse attention mechanisms, via the sparsemax (Martins and Astudillo, 2016) and the recently proposed 1.5-entmax transformation (Peters et al., 2019) , described in detail in \u00a7A. These sparse attention transformations have been applied successfully to machine translation and morphological inflection (Peters et al., 2019; Correia et al., 2019) . Words that receive non-zero attention probability are selected to be part of the explanation. This is an embedded method akin of the use of 1 -regularization in static feature selection. We experiment with these sparse attention mechanisms in \u00a75.", |
|
"cite_spans": [ |
|
{ |
|
"start": 519, |
|
"end": 537, |
|
"text": "(Lei et al., 2016;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 667, |
|
"end": 696, |
|
"text": "(Martins and Astudillo, 2016)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 749, |
|
"end": 770, |
|
"text": "(Peters et al., 2019)", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 922, |
|
"end": 943, |
|
"text": "(Peters et al., 2019;", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 944, |
|
"end": 965, |
|
"text": "Correia et al., 2019)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Embedded Sparse Attention", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We now have the necessary ingredients to describe our unified framework for comparing and designing explanation strategies, illustrated in Figure 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 139, |
|
"end": 147, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Explainability as Communication", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Our fundamental assumption is that explainability is intimately linked to the ability of an explainer to communicate the rationale of a decision in terms that can be understood by a human; we use the success of this communication as a criterion for how plausible the explanation is.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Explainability as Communication", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Our framework draws inspiration from Lewis' signaling games (Lewis, 1969) and the recent work on emergent communication (Foerster et al., 2016; Lazaridou et al., 2016; Havrylov and Titov, 2017) . Our starting point is the classifier C : X \u2192 Y which, when given an input x \u2208 X , produces a prediction\u0177 \u2208 Y. This is the prediction that we want to explain. An explanation is a message m \u2208 M, for a predefined message space M (for example, a rationale). The goal of the explainer E is to compose and successfully communicate messages m to a layperson L. The success of the communication is dictated by the ability of L to reconstruct\u0177 from m with high accuracy. In this paper, we experiment with E and L being either humans or machines. Our framework is inspired by human-grounded evaluation through forward simulation/prediction, as proposed by Doshi-Velez (2017, \u00a73.2). More formally:", |
|
"cite_spans": [ |
|
{ |
|
"start": 60, |
|
"end": 73, |
|
"text": "(Lewis, 1969)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 120, |
|
"end": 143, |
|
"text": "(Foerster et al., 2016;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 144, |
|
"end": 167, |
|
"text": "Lazaridou et al., 2016;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 168, |
|
"end": 193, |
|
"text": "Havrylov and Titov, 2017)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Classifier-Explainer-Layperson setup", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 The classifier C is the model whose predictions we want to explain. For given inputs x, C produces\u0177 that are hopefully close to the ground truth y. We are agnostic about the kind of model used as a classifier, but we assume that it computes certain internal representations h that can be exposed to the explainer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Classifier-Explainer-Layperson setup", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 The explainer E produces explanations for C's decisions. It receives the input x, the classifier prediction\u0177 = C(x), and optionally the internal representations h exposed by C. It outputs a message m \u2208 M regarded as a \"rationale\" for\u0177. The message m = E(x,\u0177, h) should be simple and compact enough to be easily transmitted and understood by the layperson L. In this paper, we constrain messages to be bags-of-words (BoWs) extracted from the textual input x.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Classifier-Explainer-Layperson setup", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 The layperson L is a simple model (e.g., a linear classifier) 2 that receives the message m as input, and predicts a final output\u1ef9 = L(m). The communication is successful if\u1ef9 =\u0177. Given a test set {x 1 , . . . , x N }, we evaluate the communication success rate (CSR) as the fraction of examples for which the communication is successful: Under this framework, we regard the communication success rate as a quantifiable measure of explainability: a high CSR means that the layperson L is able to replicate the classifier C's decisions a large fraction of the time when presented with the messages given by the explainer E; this assesses how informative E's messages are.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Classifier-Explainer-Layperson setup", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "CSR = 1 N N n=1 C(x n ) = L(E(x n , C(x n ))) ,", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "The Classifier-Explainer-Layperson setup", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Our framework is flexible, allowing different configurations for C, E, and L, as next described. In \u00a75, we show examples of explainers and laypeople for text classification and natural language inference tasks (additional experiments on machine translation are described in \u00a7G).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Classifier-Explainer-Layperson setup", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Relation to filters and wrappers. In the wrapper and filter approaches described in \u00a72, the classifier C and the explainer E are separate components. In these approaches, E works as a post-hoc explainer, querying C with new examples or requesting gradient information.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Classifier-Explainer-Layperson setup", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Relation to embedded explanation. By contrast, in the embedded approaches of Lei et al. (2016) and the selective sparse attention introduced in \u00a73, the explainer E is directly embedded as an internal component of the classifier C, returning the selected features as the message. This approach is arguably more faithful, as E is directly linked to the mechanism that produces C's decisions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 77, |
|
"end": 94, |
|
"text": "Lei et al. (2016)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Classifier-Explainer-Layperson setup", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "So far we have assumed that E is given beforehand, chosen among existing explanation methods, and that L is trained to assess the explanatory ability of E. But can our framework be used to create new explainers by training E and L jointly? We will see how this can be done by letting E and L play a cooperative game (Lewis, 1969) . The key idea is that they need to learn a communication protocol that ensures high CSR (Eq. 1). Special care needs to be taken to rule out \"trivial\" protocols and ensure plausible, potentially faithful, explanations. We propose a strategy to ensure this, which will be validated using human evaluation in \u00a76. 3 Let E \u03b8 and layperson L \u03c6 be trained models (with parameters \u03b8 and \u03c6), learned together to optimize a multi-task objective with two terms:", |
|
"cite_spans": [ |
|
{ |
|
"start": 316, |
|
"end": 329, |
|
"text": "(Lewis, 1969)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 641, |
|
"end": 642, |
|
"text": "3", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint training of explainer and layperson", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u2022 A reconstruction term that controls the information about the classifier's decision\u0177. We use a cross-entropy loss on the output of the layperson L, using\u0177 (and not the true label y) as the ground truth:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint training of explainer and layperson", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "L(\u03c6, \u03b8) = \u2212 log p \u03c6 (\u0177 | m),", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint training of explainer and layperson", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "where m is the output of the explainer E \u03b8 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint training of explainer and layperson", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u2022 A faithfulness term that encourages the explainer E to take into account the classifier's decision process when producing its explanation m. This is done by adding a squared loss term", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint training of explainer and layperson", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u2126(\u03b8) = h (E \u03b8 ), h 2 whereh is E's prediction of C's internal representation h.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint training of explainer and layperson", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The objective function is a combination of these two terms, L \u2126 (\u03c6, \u03b8) := \u03bb\u2126(\u03b8) + L(\u03c6, \u03b8). We used \u03bb = 1 in our experiments. This objective is minimized in a training set that contains pairs (x,\u0177). Therefore, in this model the message m is latent and works as a \"bottleneck\" for the layperson L, which does not have access to the full input x, to guess the classifier's prediction\u0177-related models have been devised in the context of emergent communication (Lazaridou et al., 2016; Foerster et al., 2016; Havrylov and Titov, 2017) and sparse autoencoders (Trifonov et al., 2018; Subramanian et al., 2018) . We minimize the objective above with gradient backpropagation. To ensure end-to-end differentiability, during this joint training we use sparsemax attention ( \u00a73) to select the relevant words in the message. One important concern in this model is to prevent E and L from learning a trivial protocol to maximize CSR. To ensure this, we forbid E from including stopwords in its messages and during training we use a linear schedule for the probability of the explainer accessing the predictions of the classifier (\u0177), which are hidden otherwise. At the end of training, the explainer will access it with probability \u03b2. In our experiments, we set \u03b2 to 20% (chosen on the validation set as described in \u00a7F.2).", |
|
"cite_spans": [ |
|
{ |
|
"start": 456, |
|
"end": 480, |
|
"text": "(Lazaridou et al., 2016;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 481, |
|
"end": 503, |
|
"text": "Foerster et al., 2016;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 504, |
|
"end": 529, |
|
"text": "Havrylov and Titov, 2017)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 554, |
|
"end": 577, |
|
"text": "(Trifonov et al., 2018;", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 578, |
|
"end": 603, |
|
"text": "Subramanian et al., 2018)", |
|
"ref_id": "BIBREF48" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint training of explainer and layperson", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We experimented with our framework on two NLP tasks: text classification and natural language inference. Additional experiments on machine translation are reported in \u00a7G, with similar conclusions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We used 4 datasets (SST, IMDB, AgNews, Yelp) for text classification and one dataset (SNLI) for NLI, with statistics and details in Table 5 ( \u00a7B).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 132, |
|
"end": 139, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Classifier C. For text classification, the input x \u2208 X is a document and the output set Y is a set of labels (e.g. topics or sentiment labels). The message is a bag of words (BoW) extracted from the document. As in Jain and Wallace (2019) and Wiegreffe and Pinter (2019) , our classifier C is an RNN with attention. For NLI, the input x is a pair of sentences (premise and hypothesis) and the labels in Y are entailment, contradiction, and neutral. We let messages be again BoWs, and we constrain them to be selected from the premise (and concatenated with the full hypothesis). We used a similar classifier as above, but with two independent BiL-STM layers, one for each sentence. We used the additive attention of Bahdanau et al. (2015) with the last hidden state of the hypothesis as the query and the premise vectors as keys.", |
|
"cite_spans": [ |
|
{ |
|
"start": 243, |
|
"end": 270, |
|
"text": "Wiegreffe and Pinter (2019)", |
|
"ref_id": "BIBREF53" |
|
}, |
|
{ |
|
"start": 716, |
|
"end": 738, |
|
"text": "Bahdanau et al. (2015)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We also experimented with RNN classifiers that replace softmax attention by 1.5-entmax (C ent ) and sparsemax (C sp ), and with the rationalizer models of Lei et al. (2016) (C bern ) and Bastings et al. (2019) (C hk ). Details about these classifiers and their hyperparameters are listed in \u00a7D. Table 2 reports the accuracy of all classifiers used in our experiments. The attention-based models all perform very similarly and generally better than the rationalizer models, except for SNLI, where the latter use a stronger model with decomposable attention. As expected, in general, all these classifiers outperform a bag-of-words model which is the model we use as the layperson.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 295, |
|
"end": 302, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Layperson L and explainer E. We used a simple linear BoW model as the layperson L. For NLI, the layperson sees the full hypothesis, encoding it with a BiLSTM. The BoW from the explainer is passed through a linear projection and summed with the last hidden state of the BiLSTM.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We evaluated the following explainers:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "1. Erasure, a wrapper similar to the leave-one-out approaches of Jain and Wallace (2019) and Serrano and Smith (2019). We obtain the word with largest attention, zero out its input vector, and repass the whole input with the erased vector to the classifier C. We produce the message by repeating this procedure k times.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "2. Top-k gradients, a filter approach that ranks word importance by their \"input \u00d7 gradient\" product, Ancona et al., 2018; Wiegreffe and Pinter, 2019) . The top-k words are selected as the message. Table 2 ; in all cases the layperson is a BoW model. Only explainers of the same classifier can be compared in terms of CSR. Top rows report performance for random, wrapper and filter explainers, for fixed k-word messages (the values of k for the several datasets are {5, 10, 10, 10, 4}, respectively). Bottom rows correspond to embedded methods where k is given automatically via sparsity. The average k obtained by 1.5-entmax, sparsemax, Bernoulli and HardKuma are: SST: {4.65, 2.59, 6.10, 4.82}; IMDB: {28.23, 12.94, 39.40, 24.18}; AGNEWS {5.65, 4.14, 4.01, 9.68}; YELP: {60.61, 23.86, 9.15, 33.18}; SNLI: {12.96, 8.27, 15.04, 6.40}. 3. Top-k and selective attention: We experimented both using attention as a filter, by selecting the top-k most attended words as the message, and embedded in the classifier C, by using the selective attentions described in \u00a73 (1.5-entmax and sparsemax).", |
|
"cite_spans": [ |
|
{ |
|
"start": 102, |
|
"end": 122, |
|
"text": "Ancona et al., 2018;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 123, |
|
"end": 150, |
|
"text": "Wiegreffe and Pinter, 2019)", |
|
"ref_id": "BIBREF53" |
|
}, |
|
{ |
|
"start": 666, |
|
"end": 677, |
|
"text": "SST: {4.65,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 678, |
|
"end": 683, |
|
"text": "2.59,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 684, |
|
"end": 689, |
|
"text": "6.10,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 690, |
|
"end": 696, |
|
"text": "4.82};", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 697, |
|
"end": 710, |
|
"text": "IMDB: {28.23,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 711, |
|
"end": 717, |
|
"text": "12.94,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 718, |
|
"end": 724, |
|
"text": "39.40,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 725, |
|
"end": 732, |
|
"text": "24.18};", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 733, |
|
"end": 746, |
|
"text": "AGNEWS {5.65,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 747, |
|
"end": 752, |
|
"text": "4.14,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 753, |
|
"end": 758, |
|
"text": "4.01,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 759, |
|
"end": 765, |
|
"text": "9.68};", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 766, |
|
"end": 779, |
|
"text": "YELP: {60.61,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 780, |
|
"end": 786, |
|
"text": "23.86,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 787, |
|
"end": 792, |
|
"text": "9.15,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 793, |
|
"end": 800, |
|
"text": "33.18};", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 801, |
|
"end": 814, |
|
"text": "SNLI: {12.96,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 815, |
|
"end": 820, |
|
"text": "8.27,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 821, |
|
"end": 827, |
|
"text": "15.04,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 828, |
|
"end": 834, |
|
"text": "6.40}.", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 198, |
|
"end": 205, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "| \u2202\u0177 \u2202x i \u2022 x i | (", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The rationalizer models of Lei et al. (2016) and . These models compose the message by stochastically sampling rationale words, respectively using Bernoulli and HardKuma distributions. For SNLI, since these models use decomposable attention instead of RNNs, we form the message by selecting all premise words that are linked with any hypothesis word via a selected Bernoulli variable.", |
|
"cite_spans": [ |
|
{ |
|
"start": 27, |
|
"end": 44, |
|
"text": "Lei et al. (2016)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "4.", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We also report a random baseline, which randomly picks k words as the message. We show examples of messages for all explainers in \u00a7I.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "4.", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Results. Table 3 reports results for the communication success rate (CSR, Eq. 1) and for the accuracy of the layperson (ACC L ). For each explainer, we indicate which classifier it is explaining; note that the CSR is only comparable across explainers that use the same classifier. The goal of this experiment is to answer the following questions: (i) How do different explainers (wrappers, filters, embedded) compare to each other? (ii) Are selective sparse attention methods effective? (iii) How is the trade-off between message length and CSR?", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 9, |
|
"end": 16, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "4.", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The first thing to note is that, as expected, the random baseline is much worse than the other ex-plainers, for all text classification datasets. 4 Among the non-trivial explainers, the attention and erasure outperform gradient methods: the erasure and top-k attention explainers have similar CSR, with a slight advantage for attention methods. Note that the attention explainers have the important advantage of requiring a single call to the classifier, whereas the erasure methods, being wrappers, require k calls. The worse performance of top-k gradient (less severe on AGNEWS) suggests that the words that locally cause bigger output changes are not necessarily the most informative ones. 5 Regarding the different attention models (softmax, entmax, and sparsemax), we see that sparse transformations tend to have slightly better ACC L , in addition to better ACC C (see Table 2 ). The embedded sparse attention methods achieved communication scores on par with the top-k attention methods without a prescribed k, while producing, by construction, more faithful explanations. Both our proposed models (sparsemax and 1.5entmax) seem generally more accurate than the Bernoulli model of Lei et al. (2016) and comparable to the HardKuma model of , with a much simpler training procedure, not requiring gradient estimation over stochastic computation graphs. Finally, Figure 2 shows the trade-off between the length of the message and the communication success rate for different values of k both for IMDB and SNLI (see Figure 4 in \u00a7G for the IWSLT experiments, with similar findings). Interestingly, we observe that CSR does not increase monotonically with k. As k increases, CSR starts by increasing but then it starts dropping when k becomes too large. This matches our intuition: in the two extreme cases where k = 0 and where k is the document length (corresponding to a full bag-of-words classifier) the message has no information about how the classifier C behaves. By setting k = 0, meaning that the layperson L only looks at the hypothesis, the CSR is reasonably high (\u223c74%), but as soon as we include a single word in the message this baseline is surpassed by 4 points or more.", |
|
"cite_spans": [ |
|
{ |
|
"start": 146, |
|
"end": 147, |
|
"text": "4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 693, |
|
"end": 694, |
|
"text": "5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1188, |
|
"end": 1205, |
|
"text": "Lei et al. (2016)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 875, |
|
"end": 882, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 1367, |
|
"end": 1375, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 1519, |
|
"end": 1527, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "4.", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To fully assess the quality of the explanations in a more realistic forward simulation setting, we performed human evaluations, where the layperson L is a human instead of a machine.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Evaluation", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Joint training of E and L. So far we compared several explainers, but what happens if we train E and L jointly to optimize CSR directly, as described in \u00a74.2? We experiment with the IMDB and SNLI datasets, comparing with using humans for either the layperson, the explainer, or both.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Evaluation", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Human layperson. We randomly selected 200 documents for IMDB and SNLI to be annotated by humans. The extracted explanations (i.e. the selected words) were shuffled and displayed as a cloud of words to two annotators, who were asked to predict the label of each document when seeing only these explanations. For SNLI, we show the entire hypothesis as raw text and the premise as a cloud of words. The agreement between annotators and other annotation details can be found in \u00a7H.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Evaluation", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Human explainer. We also consider explanations generated by humans rather than machines. To this end, we used the e-SNLI corpus (Camburu et al., 2018) , which extends the SNLI with human rationales. Since the e-SNLI corpus does not provide highlights over the premise for neutral pairs, we removed them from the test set. 6 We summarize our results in Table 4 . We observe that, also with human laypeople, top-k attention achieves better results than top-k gradient, in terms of CSR and ACC, and that the ACC of erasure, attention models, and human explainers are close, reinforcing again the good results for these explainers. Among the different attention explainers, we see that selective attention explainers ( \u00a73) got very high ACC H , outperforming top-k explainers for SNLI. We also see that the joint explainer ( \u00a74.2) outperformed all the other explainers in ACC L and CSR L and achieved very high human performance on IMDB, largely surpassing other systems in CSR H and ACC H . This shows the potential of our communication-based framework to develop new post-hoc explainers with good forward simulation properties. However, for SNLI, the joint explainer had much lower CSR H and ACC H , suggesting that for this task more sophisticated explainers are required.", |
|
"cite_spans": [ |
|
{ |
|
"start": 128, |
|
"end": 150, |
|
"text": "(Camburu et al., 2018)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 322, |
|
"end": 323, |
|
"text": "6", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 352, |
|
"end": 359, |
|
"text": "Table 4", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Human Evaluation", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "There is a large body of work on analysis and interpretation of neural networks. Our work focuses on prediction explainability, different from transparency or model interpretability (Doshi-Velez, 2017; Lipton, 2018; Gilpin et al., 2018) . Rudin (2019) defines explainability as a plausible reconstruction of the decision-making process, and Riedl (2019) argues that it mimics what humans do when rationalizing past actions. This inspired our post-hoc explainers in \u00a74.2 and their use of the faithfulness loss term. Recent works questioned the interpretative ability of attention mechanisms (Jain and Wallace, 2019; Serrano and Smith, 2019) . Wiegreffe and Pinter (2019) distinguished between faithful and plausible explanations and introduced several diagnostic tools. Mullenbach et al. (2018) use human evaluation to show that attention mechanisms produce plausible explanations, consistent with our findings in \u00a76. None of these works, however, considered the sparse selective attention mechanisms proposed in \u00a73. Hard stochastic attention has been considered by Xu et al. (2015) ; Lei et al. (2016) ; Alvarez-Melis and Jaakkola (2017); , but a comparison with sparse attention and explanation strategies was still missing.", |
|
"cite_spans": [ |
|
{ |
|
"start": 182, |
|
"end": 201, |
|
"text": "(Doshi-Velez, 2017;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 202, |
|
"end": 215, |
|
"text": "Lipton, 2018;", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 216, |
|
"end": 236, |
|
"text": "Gilpin et al., 2018)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 239, |
|
"end": 251, |
|
"text": "Rudin (2019)", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 590, |
|
"end": 614, |
|
"text": "(Jain and Wallace, 2019;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 615, |
|
"end": 639, |
|
"text": "Serrano and Smith, 2019)", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 642, |
|
"end": 669, |
|
"text": "Wiegreffe and Pinter (2019)", |
|
"ref_id": "BIBREF53" |
|
}, |
|
{ |
|
"start": 769, |
|
"end": 793, |
|
"text": "Mullenbach et al. (2018)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 1065, |
|
"end": 1081, |
|
"text": "Xu et al. (2015)", |
|
"ref_id": "BIBREF54" |
|
}, |
|
{ |
|
"start": 1084, |
|
"end": 1101, |
|
"text": "Lei et al. (2016)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Besides attention-based methods, many other explainers have been proposed using gradients (Bach et al., 2015; Montavon et al., 2018; Ding et al., 2019) , leave-one-out strategies (Feng et al., 2018; Serrano and Smith, 2019) , or local perturbations (Ribeiro et al., 2016; Koh and Liang, 2017) , but a link with filters and wrappers in the feature selection literature has never been made. We believe the connections revealed in \u00a72 may be useful to develop new explainers in the future.", |
|
"cite_spans": [ |
|
{ |
|
"start": 90, |
|
"end": 109, |
|
"text": "(Bach et al., 2015;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 110, |
|
"end": 132, |
|
"text": "Montavon et al., 2018;", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 133, |
|
"end": 151, |
|
"text": "Ding et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 179, |
|
"end": 198, |
|
"text": "(Feng et al., 2018;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 199, |
|
"end": 223, |
|
"text": "Serrano and Smith, 2019)", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 249, |
|
"end": 271, |
|
"text": "(Ribeiro et al., 2016;", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 272, |
|
"end": 292, |
|
"text": "Koh and Liang, 2017)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Our trained explainers from \u00a74.2 draw inspiration from emergent communication (Lazaridou et al., 2016; Foerster et al., 2016; Havrylov and Titov, 2017) . Some of our proposed ideas (e.g., using sparsemax for end-to-end differentiability) may also be relevant to that task. Our work is also related to sparse auto-encoders, which seek sparse overcomplete vector representations to improve model interpretability (Faruqui et al., 2015; Trifonov et al., 2018; Subramanian et al., 2018) . In contrast to these works, we consider the non-zero attention probabilities as a form of explanation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 78, |
|
"end": 102, |
|
"text": "(Lazaridou et al., 2016;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 103, |
|
"end": 125, |
|
"text": "Foerster et al., 2016;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 126, |
|
"end": 151, |
|
"text": "Havrylov and Titov, 2017)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 411, |
|
"end": 433, |
|
"text": "(Faruqui et al., 2015;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 434, |
|
"end": 456, |
|
"text": "Trifonov et al., 2018;", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 457, |
|
"end": 482, |
|
"text": "Subramanian et al., 2018)", |
|
"ref_id": "BIBREF48" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Some recent work (Yu et al., 2019; DeYoung et al., 2020) advocates comprehensive rationales. While comprehensiveness could be useful in our framework to prevent trivial communication protocols between the explainer and layperson, we argue that it is not always a desirable property, since it leads to longer explanations and an increase of human cognitive load. In fact, our analysis of CSR as a function of message length (Figure 2 ) suggests that shorter explanations might be preferable. This is aligned to the \"explanation selection\" principle articulated by Miller (2019, \u00a74) : \"Similar to causal connection, people do not typically provide all causes for an event as an explanation. Instead, they select what they believe are the most relevant causes.\" Our sparse, selective attention mechanisms proposed in \u00a73 are inspired by this principle.", |
|
"cite_spans": [ |
|
{ |
|
"start": 17, |
|
"end": 34, |
|
"text": "(Yu et al., 2019;", |
|
"ref_id": "BIBREF55" |
|
}, |
|
{ |
|
"start": 35, |
|
"end": 56, |
|
"text": "DeYoung et al., 2020)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 563, |
|
"end": 580, |
|
"text": "Miller (2019, \u00a74)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 423, |
|
"end": 432, |
|
"text": "(Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We proposed a unified framework that regards explainability as a communication problem between an explainer and a layperson about a classifier's decision. We proposed new embedded methods based on selective attention, and post-hoc explainers trained to optimize communication success. In our experiments, we observed that attention mechanisms and erasure tend to outperform gradient methods on communication success rate, using both machines and humans as the layperson, and that selective attention is effective, while simpler to train than stochastic rationalizers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "In linear models this gradient equals the feature's weight.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The reason why we assume the layperson is a simple model is to encourage the explainer to produce simple and explanatory messages, in the sense that a simple model can learn with them. A more powerful layperson could potentially do well even with bad explanations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Other approaches, such asLei et al. (2016) andYu et al. (2019), develop rationalizers from cooperative or adversarial games between generators and encoders. However, those frameworks do not aim at explaining an external classifier.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This is less pronounced in SNLI, as the hypothesis alone already gives strong baselines (Gururangan et al., 2018).5 A potential reason is that attention directly influences C's decisions, being an inside component of the model. Gradients and erasure, however, are extracted after decisions are performed. The reason might be similar to filter methods being generally inferior to embedded methods in static feature selection, since they ignore feature interactions that may jointly play a role in model's decisions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Note that the human rationales from eSNLI are not explanations about C, since the humans are explaining the gold labels. Therefore, we have CSR=ACC always.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work was supported by the European Research Council (ERC StG DeepSPIN 758969), by the P2020 program MAIA (contract 045909), and by the Funda\u00e7\u00e3o para a Ci\u00eancia e Tecnologia through contract UID/50008/2019. We are grateful to Thales Bertaglia, Erick Fonseca, Pedro Martins, Vlad Niculae, Ben Peters, Gon\u00e7alo Correia and Tsvetomila Mihaylova for insightful group discussion and for the participation in human evaluation experiments. We also thank the anonymous reviewers for their helpful discussion and feedback.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "A causal framework for explaining the predictions of black-box sequence-to-sequence models", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Alvarez-Melis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tommi", |
|
"middle": [], |
|
"last": "Jaakkola", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "412--421", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D17-1042" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Alvarez-Melis and Tommi Jaakkola. 2017. A causal framework for explaining the predictions of black-box sequence-to-sequence models. In Pro- ceedings of the 2017 Conference on Empirical Meth- ods in Natural Language Processing, pages 412- 421, Copenhagen, Denmark. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Towards better understanding of gradient-based attribution methods for deep neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Ancona", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Enea", |
|
"middle": [], |
|
"last": "Ceolini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cengiz\u00f6ztireli", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Markus", |
|
"middle": [], |
|
"last": "Gross", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Ancona, Enea Ceolini, Cengiz\u00d6ztireli, and Markus Gross. 2018. Towards better understanding of gradient-based attribution methods for deep neu- ral networks. In International Conference on Learn- ing Representations.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Explaining predictions of non-linear classifiers in NLP", |
|
"authors": [ |
|
{ |
|
"first": "Leila", |
|
"middle": [], |
|
"last": "Arras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Franziska", |
|
"middle": [], |
|
"last": "Horn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gr\u00e9goire", |
|
"middle": [], |
|
"last": "Montavon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klaus-Robert", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wojciech", |
|
"middle": [], |
|
"last": "Samek", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 1st Workshop on Representation Learning for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--7", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W16-1601" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leila Arras, Franziska Horn, Gr\u00e9goire Montavon, Klaus-Robert M\u00fcller, and Wojciech Samek. 2016. Explaining predictions of non-linear classifiers in NLP. In Proceedings of the 1st Workshop on Repre- sentation Learning for NLP, pages 1-7, Berlin, Ger- many. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Explaining recurrent neural network predictions in sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Leila", |
|
"middle": [], |
|
"last": "Arras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gr\u00e9goire", |
|
"middle": [], |
|
"last": "Montavon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klaus-Robert", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wojciech", |
|
"middle": [], |
|
"last": "Samek", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 8th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "159--168", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W17-5221" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leila Arras, Gr\u00e9goire Montavon, Klaus-Robert M\u00fcller, and Wojciech Samek. 2017. Explaining recurrent neural network predictions in sentiment analysis. In Proceedings of the 8th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis, pages 159-168, Copenhagen, Den- mark. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "On pixel-wise explanations for non-linear classifier decisions by layer-wise relevance propagation", |
|
"authors": [ |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Bach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Binder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gr\u00e9goire", |
|
"middle": [], |
|
"last": "Montavon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frederick", |
|
"middle": [], |
|
"last": "Klauschen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klaus-Robert", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wojciech", |
|
"middle": [], |
|
"last": "Samek", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "PLOS ONE", |
|
"volume": "10", |
|
"issue": "7", |
|
"pages": "1--46", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1371/journal.pone.0130140" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastian Bach, Alexander Binder, Gr\u00e9goire Mon- tavon, Frederick Klauschen, Klaus-Robert M\u00fcller, and Wojciech Samek. 2015. On pixel-wise explana- tions for non-linear classifier decisions by layer-wise relevance propagation. PLOS ONE, 10(7):1-46.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Neural machine translation by jointly learning to align and translate", |
|
"authors": [ |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2015. Neural machine translation by jointly learning to align and translate. In Proceedings of the 2015 International Conference on Learning Rep- resentations.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Interpretable neural predictions with differentiable binary variables", |
|
"authors": [ |
|
{ |
|
"first": "Jasmijn", |
|
"middle": [], |
|
"last": "Bastings", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wilker", |
|
"middle": [], |
|
"last": "Aziz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Titov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2963--2977", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1284" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jasmijn Bastings, Wilker Aziz, and Ivan Titov. 2019. Interpretable neural predictions with differentiable binary variables. In Proceedings of the 57th Annual Meeting of the Association for Computational Lin- guistics, pages 2963-2977, Florence, Italy. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "e-snli: Natural language inference with natural language explanations", |
|
"authors": [ |
|
{ |
|
"first": "Oana-Maria", |
|
"middle": [], |
|
"last": "Camburu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rockt\u00e4schel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Lukasiewicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Blunsom", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Advances in Neural Information Processing Systems 31", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "9539--9549", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oana-Maria Camburu, Tim Rockt\u00e4schel, Thomas Lukasiewicz, and Phil Blunsom. 2018. e-snli: Nat- ural language inference with natural language expla- nations. In Advances in Neural Information Pro- cessing Systems 31, pages 9539-9549. Curran As- sociates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Overview of the iwslt 2017 evaluation campaign", |
|
"authors": [ |
|
{ |
|
"first": "Mauro", |
|
"middle": [], |
|
"last": "Cettolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcello", |
|
"middle": [], |
|
"last": "Federico", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luisa", |
|
"middle": [], |
|
"last": "Bentivogli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niehues", |
|
"middle": [], |
|
"last": "Jan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "St\u00fcker", |
|
"middle": [], |
|
"last": "Sebastian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sudoh", |
|
"middle": [], |
|
"last": "Katsuitho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshino", |
|
"middle": [], |
|
"last": "Koichiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Federmann", |
|
"middle": [], |
|
"last": "Christian", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 14th International Workshop on Spoken Language Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2--14", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mauro Cettolo, Marcello Federico, Luisa Bentivogli, Niehues Jan, St\u00fcker Sebastian, Sudoh Katsuitho, Yoshino Koichiro, and Federmann Christian. 2017. Overview of the iwslt 2017 evaluation campaign. In Proceedings of the 14th International Workshop on Spoken Language Translation, pages 2-14.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Word association norms, mutual information, and lexicography", |
|
"authors": [ |
|
{ |
|
"first": "Kenneth", |
|
"middle": [ |
|
"Ward" |
|
], |
|
"last": "Church", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Hanks", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1989, |
|
"venue": "27th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "76--83", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/981623.981633" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kenneth Ward Church and Patrick Hanks. 1989. Word association norms, mutual information, and lexicog- raphy. In 27th Annual Meeting of the Association for Computational Linguistics, pages 76-83, Van- couver, British Columbia, Canada. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Adaptively sparse transformers", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Gon\u00e7alo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vlad", |
|
"middle": [], |
|
"last": "Correia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andr\u00e9", |
|
"middle": [ |
|
"F T" |
|
], |
|
"last": "Niculae", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Martins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2174--2184", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1223" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gon\u00e7alo M. Correia, Vlad Niculae, and Andr\u00e9 F. T. Martins. 2019. Adaptively sparse transformers. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 2174- 2184, Hong Kong, China. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "ERASER: A benchmark to evaluate rationalized NLP models", |
|
"authors": [ |
|
{ |
|
"first": "Jay", |
|
"middle": [], |
|
"last": "Deyoung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sarthak", |
|
"middle": [], |
|
"last": "Jain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nazneen", |
|
"middle": [], |
|
"last": "Fatema Rajani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Lehman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Byron", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Wallace", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4443--4458", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.408" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jay DeYoung, Sarthak Jain, Nazneen Fatema Rajani, Eric Lehman, Caiming Xiong, Richard Socher, and Byron C. Wallace. 2020. ERASER: A benchmark to evaluate rationalized NLP models. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 4443-4458, On- line. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Saliency-driven word alignment interpretation for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Shuoyang", |
|
"middle": [], |
|
"last": "Ding", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hainan", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Fourth Conference on Machine Translation", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1--12", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-5201" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shuoyang Ding, Hainan Xu, and Philipp Koehn. 2019. Saliency-driven word alignment interpretation for neural machine translation. In Proceedings of the Fourth Conference on Machine Translation (Volume 1: Research Papers), pages 1-12, Florence, Italy. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Towards a rigorous science of interpretable machine learning", |
|
"authors": [ |
|
{ |
|
"first": "Been", |
|
"middle": [], |
|
"last": "Doshi-Velez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": ";", |
|
"middle": [], |
|
"last": "Finale", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1702.08608" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Been Doshi-Velez, Finale; Kim. 2017. Towards a rig- orous science of interpretable machine learning. In eprint arXiv:1702.08608.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Sparse overcomplete word vector representations", |
|
"authors": [ |
|
{ |
|
"first": "Manaal", |
|
"middle": [], |
|
"last": "Faruqui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yulia", |
|
"middle": [], |
|
"last": "Tsvetkov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dani", |
|
"middle": [], |
|
"last": "Yogatama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1491--1500", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/P15-1144" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Manaal Faruqui, Yulia Tsvetkov, Dani Yogatama, Chris Dyer, and Noah A. Smith. 2015. Sparse overcom- plete word vector representations. In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 1491-1500, Beijing, China. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Pathologies of neural models make interpretations difficult", |
|
"authors": [ |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Shi Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alvin", |
|
"middle": [], |
|
"last": "Wallace", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [ |
|
"I" |
|
], |
|
"last": "Grissom", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pedro", |
|
"middle": [], |
|
"last": "Rodriguez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jordan", |
|
"middle": [], |
|
"last": "Boyd-Graber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3719--3728", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1407" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shi Feng, Eric Wallace, Alvin Grissom II, Mohit Iyyer, Pedro Rodriguez, and Jordan Boyd-Graber. 2018. Pathologies of neural models make interpretations difficult. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 3719-3728, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Learning to communicate with deep multi-agent reinforcement learning", |
|
"authors": [ |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Foerster", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "29", |
|
"issue": "", |
|
"pages": "2137--2145", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jakob Foerster, Ioannis Alexandros Assael, Nando de Freitas, and Shimon Whiteson. 2016. Learning to communicate with deep multi-agent reinforcement learning. In Advances in Neural Information Pro- cessing Systems 29, pages 2137-2145. Curran Asso- ciates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Explaining explanations: An overview of interpretability of machine learning", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Gilpin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Bau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [ |
|
"Z" |
|
], |
|
"last": "Yuan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Bajwa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Specter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Kagal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "2018 IEEE 5th International Conference on Data Science and Advanced Analytics (DSAA)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "80--89", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "L. H. Gilpin, D. Bau, B. Z. Yuan, A. Bajwa, M. Specter, and L. Kagal. 2018. Explaining explanations: An overview of interpretability of machine learning. In 2018 IEEE 5th International Conference on Data Science and Advanced Analytics (DSAA), pages 80- 89.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Neural Network Methods in Natural Language Processing", |
|
"authors": [ |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graeme", |
|
"middle": [], |
|
"last": "Hirst", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoav Goldberg and Graeme Hirst. 2017. Neural Network Methods in Natural Language Processing. Morgan & Claypool Publishers.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Annotation artifacts in natural language inference data", |
|
"authors": [ |
|
{ |
|
"first": "Swabha", |
|
"middle": [], |
|
"last": "Suchin Gururangan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Swayamdipta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roy", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Bowman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "107--112", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-2017" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Suchin Gururangan, Swabha Swayamdipta, Omer Levy, Roy Schwartz, Samuel Bowman, and Noah A. Smith. 2018. Annotation artifacts in natural lan- guage inference data. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 107-112, New Orleans, Louisiana. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "An introduction to variable and feature selection", |
|
"authors": [ |
|
{ |
|
"first": "Isabelle", |
|
"middle": [], |
|
"last": "Guyon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andr\u00e9", |
|
"middle": [], |
|
"last": "Elisseeff", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "1157--1182", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Isabelle Guyon and Andr\u00e9 Elisseeff. 2003. An intro- duction to variable and feature selection. Journal of Machine Learning Research, 3(null):1157-1182.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Gene selection for cancer classification using support vector machines. Machine Learning", |
|
"authors": [ |
|
{ |
|
"first": "Isabelle", |
|
"middle": [], |
|
"last": "Guyon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Barnhill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vladimir", |
|
"middle": [], |
|
"last": "Vapnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "", |
|
"volume": "46", |
|
"issue": "", |
|
"pages": "389--422", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1023/A:1012487302797" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Isabelle Guyon, Jason Weston, Stephen Barnhill, and Vladimir Vapnik. 2002. Gene selection for cancer classification using support vector machines. Ma- chine Learning, 46(1-3):389-422.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Emergence of language with multi-agent games: Learning to communicate with sequences of symbols", |
|
"authors": [ |
|
{ |
|
"first": "Serhii", |
|
"middle": [], |
|
"last": "Havrylov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Titov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "30", |
|
"issue": "", |
|
"pages": "2149--2159", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Serhii Havrylov and Ivan Titov. 2017. Emergence of language with multi-agent games: Learning to com- municate with sequences of symbols. In Advances in Neural Information Processing Systems 30, pages 2149-2159. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Towards faithfully interpretable NLP systems: How should we define and evaluate faithfulness?", |
|
"authors": [ |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Jacovi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4198--4205", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.386" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alon Jacovi and Yoav Goldberg. 2020. Towards faith- fully interpretable NLP systems: How should we de- fine and evaluate faithfulness? In Proceedings of the 58th Annual Meeting of the Association for Compu- tational Linguistics, pages 4198-4205, Online. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Attention is not Explanation", |
|
"authors": [ |
|
{ |
|
"first": "Sarthak", |
|
"middle": [], |
|
"last": "Jain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Byron", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Wallace", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "3543--3556", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1357" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sarthak Jain and Byron C. Wallace. 2019. Attention is not Explanation. In Proceedings of the 2019 Con- ference of the North American Chapter of the Asso- ciation for Computational Linguistics: Human Lan- guage Technologies, Volume 1 (Long and Short Pa- pers), pages 3543-3556, Minneapolis, Minnesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Understanding black-box predictions via influence functions", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Pang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Koh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of Machine Learning Research", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1885--1894", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pang Wei Koh and Percy Liang. 2017. Understanding black-box predictions via influence functions. vol- ume 70 of Proceedings of Machine Learning Re- search, pages 1885-1894, International Convention Centre, Sydney, Australia. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Wrappers for feature subset selection", |
|
"authors": [ |
|
{ |
|
"first": "Ron", |
|
"middle": [], |
|
"last": "Kohavi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "George", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "John", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Artif. Intell", |
|
"volume": "97", |
|
"issue": "1-2", |
|
"pages": "273--324", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/S0004-3702(97)00043-X" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ron Kohavi and George H. John. 1997. Wrap- pers for feature subset selection. Artif. Intell., 97(1-2):273-324.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Joey NMT: A minimalist NMT toolkit for novices", |
|
"authors": [ |
|
{ |
|
"first": "Julia", |
|
"middle": [], |
|
"last": "Kreutzer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jasmijn", |
|
"middle": [], |
|
"last": "Bastings", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Riezler", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "109--114", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-3019" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Julia Kreutzer, Jasmijn Bastings, and Stefan Riezler. 2019. Joey NMT: A minimalist NMT toolkit for novices. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Nat- ural Language Processing (EMNLP-IJCNLP): Sys- tem Demonstrations, pages 109-114, Hong Kong, China. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Multi-agent cooperation and the emergence of (natural) language", |
|
"authors": [ |
|
{ |
|
"first": "Angeliki", |
|
"middle": [], |
|
"last": "Lazaridou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Peysakhovich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Baroni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Angeliki Lazaridou, Alexander Peysakhovich, and Marco Baroni. 2016. Multi-agent cooperation and the emergence of (natural) language. In Interna- tional Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Rationalizing neural predictions", |
|
"authors": [ |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Lei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Regina", |
|
"middle": [], |
|
"last": "Barzilay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tommi", |
|
"middle": [], |
|
"last": "Jaakkola", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "107--117", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D16-1011" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tao Lei, Regina Barzilay, and Tommi Jaakkola. 2016. Rationalizing neural predictions. In Proceedings of the 2016 Conference on Empirical Methods in Nat- ural Language Processing, pages 107-117, Austin, Texas. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Convention: A philosophical study", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Lewis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1969, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David K. Lewis. 1969. Convention: A philosophical study.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Visualizing and understanding neural models in NLP", |
|
"authors": [ |
|
{ |
|
"first": "Jiwei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xinlei", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "681--691", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N16-1082" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiwei Li, Xinlei Chen, Eduard Hovy, and Dan Jurafsky. 2016a. Visualizing and understanding neural mod- els in NLP. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 681-691, San Diego, California. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Understanding neural networks through representation erasure", |
|
"authors": [ |
|
{ |
|
"first": "Jiwei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Will", |
|
"middle": [], |
|
"last": "Monroe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1612.08220" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiwei Li, Will Monroe, and Dan Jurafsky. 2016b. Un- derstanding neural networks through representation erasure. arXiv preprint arXiv:1612.08220.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "The mythos of model interpretability", |
|
"authors": [ |
|
{ |
|
"first": "Zachary", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Lipton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Commun. ACM", |
|
"volume": "", |
|
"issue": "10", |
|
"pages": "36--43", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zachary C. Lipton. 2018. The mythos of model inter- pretability. Commun. ACM, 61(10):36-43.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Decoupled weight decay regularization", |
|
"authors": [ |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Loshchilov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frank", |
|
"middle": [], |
|
"last": "Hutter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ilya Loshchilov and Frank Hutter. 2019. Decoupled weight decay regularization. In International Con- ference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "From softmax to sparsemax: A sparse model of attention and multi-label classification", |
|
"authors": [ |
|
{ |
|
"first": "Andre", |
|
"middle": [], |
|
"last": "Martins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ramon", |
|
"middle": [], |
|
"last": "Astudillo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of Machine Learning Research", |
|
"volume": "48", |
|
"issue": "", |
|
"pages": "1614--1623", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andre Martins and Ramon Astudillo. 2016. From soft- max to sparsemax: A sparse model of attention and multi-label classification. volume 48 of Proceedings of Machine Learning Research, pages 1614-1623, New York, New York, USA. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Explanation in artificial intelligence: Insights from the social sciences", |
|
"authors": [ |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Miller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Artificial Intelligence", |
|
"volume": "267", |
|
"issue": "", |
|
"pages": "1--38", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.artint.2018.07.007" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tim Miller. 2019. Explanation in artificial intelligence: Insights from the social sciences. Artificial Intelli- gence, 267:1 -38.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Methods for interpreting and understanding deep neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Gr\u00e9goire", |
|
"middle": [], |
|
"last": "Montavon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wojciech", |
|
"middle": [], |
|
"last": "Samek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klaus-Robert", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Digital Signal Processing", |
|
"volume": "73", |
|
"issue": "", |
|
"pages": "1--15", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.dsp.2017.10.011" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gr\u00e9goire Montavon, Wojciech Samek, and Klaus- Robert M\u00fcller. 2018. Methods for interpreting and understanding deep neural networks. Digital Signal Processing, 73:1 -15.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Explainable prediction of medical codes from clinical text", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Mullenbach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sarah", |
|
"middle": [], |
|
"last": "Wiegreffe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jon", |
|
"middle": [], |
|
"last": "Duke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimeng", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Eisenstein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1101--1111", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-1100" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James Mullenbach, Sarah Wiegreffe, Jon Duke, Jimeng Sun, and Jacob Eisenstein. 2018. Explainable pre- diction of medical codes from clinical text. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 1101-1111, New Orleans, Louisiana. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "A decomposable attention model for natural language inference", |
|
"authors": [ |
|
{ |
|
"first": "Ankur", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oscar", |
|
"middle": [], |
|
"last": "T\u00e4ckstr\u00f6m", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dipanjan", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2249--2255", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D16-1244" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ankur Parikh, Oscar T\u00e4ckstr\u00f6m, Dipanjan Das, and Jakob Uszkoreit. 2016. A decomposable attention model for natural language inference. In Proceed- ings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 2249-2255, Austin, Texas. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "GloVe: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/D14-1162" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. GloVe: Global vectors for word representation. In Proceedings of the 2014 Confer- ence on Empirical Methods in Natural Language Processing (EMNLP), pages 1532-1543, Doha, Qatar. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Sparse sequence-to-sequence models", |
|
"authors": [ |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vlad", |
|
"middle": [], |
|
"last": "Niculae", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andr\u00e9", |
|
"middle": [ |
|
"F T" |
|
], |
|
"last": "Martins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1504--1519", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1146" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ben Peters, Vlad Niculae, and Andr\u00e9 F. T. Martins. 2019. Sparse sequence-to-sequence models. In Pro- ceedings of the 57th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 1504- 1519, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Deep contextualized word representations", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "2227--2237", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-1202" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word rep- resentations. In Proceedings of the 2018 Confer- ence of the North American Chapter of the Associ- ation for Computational Linguistics: Human Lan- guage Technologies, Volume 1 (Long Papers), pages 2227-2237, New Orleans, Louisiana. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "why should i trust you?\": Explaining the predictions of any classifier. KDD '16", |
|
"authors": [ |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Marco Tulio Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carlos", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Guestrin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1135--1144", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/2939672.2939778" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Tulio Ribeiro, Sameer Singh, and Carlos Guestrin. 2016. \"why should i trust you?\": Explain- ing the predictions of any classifier. KDD '16, page 1135-1144, New York, NY, USA. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "Human-centered artificial intelligence and machine learning", |
|
"authors": [ |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Mark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Riedl", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Human Behavior and Emerging Technologies", |
|
"volume": "1", |
|
"issue": "1", |
|
"pages": "33--36", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mark O Riedl. 2019. Human-centered artificial intelli- gence and machine learning. Human Behavior and Emerging Technologies, 1(1):33-36.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Stop explaining black box machine learning models for high stakes decisions and use interpretable models instead", |
|
"authors": [ |
|
{ |
|
"first": "Cynthia", |
|
"middle": [], |
|
"last": "Rudin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Nature Machine Intelligence", |
|
"volume": "1", |
|
"issue": "5", |
|
"pages": "206--215", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cynthia Rudin. 2019. Stop explaining black box ma- chine learning models for high stakes decisions and use interpretable models instead. Nature Machine Intelligence, 1(5):206-215.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "Is attention interpretable?", |
|
"authors": [ |
|
{ |
|
"first": "Sofia", |
|
"middle": [], |
|
"last": "Serrano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2931--2951", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1282" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sofia Serrano and Noah A. Smith. 2019. Is attention interpretable? In Proceedings of the 57th Annual Meeting of the Association for Computational Lin- guistics, pages 2931-2951, Florence, Italy. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "Spine: Sparse interpretable neural embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Anant", |
|
"middle": [], |
|
"last": "Subramanian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danish", |
|
"middle": [], |
|
"last": "Pruthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Harsh", |
|
"middle": [], |
|
"last": "Jhamtani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Taylor", |
|
"middle": [], |
|
"last": "Berg-Kirkpatrick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Thirty Second AAAI Conference on Artificial Intelligence (AAAI)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anant Subramanian, Danish Pruthi, Harsh Jhamtani, Taylor Berg-Kirkpatrick, and Eduard Hovy. 2018. Spine: Sparse interpretable neural embeddings. In Proceedings of the Thirty Second AAAI Conference on Artificial Intelligence (AAAI).", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "Regression shrinkage and selection via the lasso", |
|
"authors": [ |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Tibshirani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "Journal of the Royal Statistical Society: Series B (Methodological)", |
|
"volume": "58", |
|
"issue": "1", |
|
"pages": "267--288", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1111/j.2517-6161.1996.tb02080.x" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert Tibshirani. 1996. Regression shrinkage and se- lection via the lasso. Journal of the Royal Statistical Society: Series B (Methodological), 58(1):267-288.", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "Learning and evaluating sparse interpretable sentence embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Valentin", |
|
"middle": [], |
|
"last": "Trifonov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Octavian-Eugen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Ganea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Potapenko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hofmann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "200--210", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-5422" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Valentin Trifonov, Octavian-Eugen Ganea, Anna Potapenko, and Thomas Hofmann. 2018. Learning and evaluating sparse interpretable sentence embed- dings. In Proceedings of the 2018 EMNLP Work- shop BlackboxNLP: Analyzing and Interpreting Neu- ral Networks for NLP, pages 200-210, Brussels, Bel- gium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF51": { |
|
"ref_id": "b51", |
|
"title": "Possible generalization of boltzmann-gibbs statistics", |
|
"authors": [ |
|
{ |
|
"first": "Constantino", |
|
"middle": [], |
|
"last": "Tsallis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1988, |
|
"venue": "Journal of Statistical Physics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Constantino Tsallis. 1988. Possible generalization of boltzmann-gibbs statistics. Journal of Statistical Physics.", |
|
"links": null |
|
}, |
|
"BIBREF52": { |
|
"ref_id": "b52", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "30", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141 ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Pro- cessing Systems 30, pages 5998-6008. Curran Asso- ciates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF53": { |
|
"ref_id": "b53", |
|
"title": "Attention is not not explanation", |
|
"authors": [ |
|
{ |
|
"first": "Sarah", |
|
"middle": [], |
|
"last": "Wiegreffe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuval", |
|
"middle": [], |
|
"last": "Pinter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "11--20", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1002" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sarah Wiegreffe and Yuval Pinter. 2019. Attention is not not explanation. In Proceedings of the 2019 Con- ference on Empirical Methods in Natural Language Processing and the 9th International Joint Confer- ence on Natural Language Processing (EMNLP- IJCNLP), pages 11-20, Hong Kong, China. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF54": { |
|
"ref_id": "b54", |
|
"title": "Show, attend and tell: Neural image caption generation with visual attention", |
|
"authors": [ |
|
{ |
|
"first": "Kelvin", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Kiros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aaron", |
|
"middle": [], |
|
"last": "Courville", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhudinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rich", |
|
"middle": [], |
|
"last": "Zemel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of Machine Learning Research", |
|
"volume": "37", |
|
"issue": "", |
|
"pages": "2048--2057", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kelvin Xu, Jimmy Ba, Ryan Kiros, Kyunghyun Cho, Aaron Courville, Ruslan Salakhudinov, Rich Zemel, and Yoshua Bengio. 2015. Show, attend and tell: Neural image caption generation with visual at- tention. volume 37 of Proceedings of Machine Learning Research, pages 2048-2057, Lille, France. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF55": { |
|
"ref_id": "b55", |
|
"title": "Rethinking cooperative rationalization: Introspective extraction and complement control", |
|
"authors": [ |
|
{ |
|
"first": "Mo", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shiyu", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tommi", |
|
"middle": [], |
|
"last": "Jaakkola", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4094--4103", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1420" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mo Yu, Shiyu Chang, Yang Zhang, and Tommi Jaakkola. 2019. Rethinking cooperative rationaliza- tion: Introspective extraction and complement con- trol. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natu- ral Language Processing (EMNLP-IJCNLP), pages 4094-4103, Hong Kong, China. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF56": { |
|
"ref_id": "b56", |
|
"title": "Modeling annotators: A generative approach to learning from annotator rationales", |
|
"authors": [ |
|
{ |
|
"first": "Omar", |
|
"middle": [], |
|
"last": "Zaidan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Eisner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 2008 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "31--40", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Omar Zaidan and Jason Eisner. 2008. Modeling anno- tators: A generative approach to learning from an- notator rationales. In Proceedings of the 2008 Con- ference on Empirical Methods in Natural Language Processing, pages 31-40, Honolulu, Hawaii. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF57": { |
|
"ref_id": "b57", |
|
"title": "Regularization and variable selection via the elastic net", |
|
"authors": [ |
|
{ |
|
"first": "Hui", |
|
"middle": [], |
|
"last": "Zou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Hastie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Journal of the Royal Statistical Society: Series B (Statistical Methodology)", |
|
"volume": "67", |
|
"issue": "2", |
|
"pages": "301--320", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1111/j.1467-9868.2005.00503.x" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hui Zou and Trevor Hastie. 2005. Regularization and variable selection via the elastic net. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 67(2):301-320.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Our framework to model explainability as communication. Predictions\u0177 are made by a classifier C; an explainer E (either embedded in C or operating post-hoc) accesses these predictions and communicates an explanation (a message m) to the layperson L. Success of the communication is dictated by the ability of L and C to match their predictions:\u1ef9 ?" |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Message sparsity analysis for IMDB(top) and SNLI (bottom). For SNLI, k = 0 corresponds to a case where the layperson only sees the hypothesis. The rightmost entry represents an explainer that simply passes forward all words to the layperson." |
|
}, |
|
"TABREF0": { |
|
"text": "Overview of static and dynamic feature selection techniques.", |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF1": { |
|
"text": "where[[\u2022]] is the Iverson bracket notation.", |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF2": { |
|
"text": "88.96 95.62 68.78 69.81 RNN, softmax (C) 86.16 91.79 96.28 75.80 78.34 -,1.5-entmax (Cent) 86.11 91.72 96.30 75.72 79.20 -, sparsemax (Csp) 86.27 91.52 96.37 75.72 78.78 Bernoulli (Cbern) 81.99 86.99 95.68 70.12 79.24 HardKuma (Chk) 84.13 91.06 96.38 74.36 85.49", |
|
"num": null, |
|
"content": "<table><tr><td>CLASSIFIER</td><td>SST</td><td>IMDB AGN. YELP SNLI</td></tr><tr><td>BoW (L)</td><td>82.54</td><td/></tr></table>", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"text": "Accuracies of the original classifiers on text classification and natural language inference.", |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF5": { |
|
"text": "CSR and layperson accuracy (ACC L ) for several explainers. For each explainer, we indicate the corresponding classifier from", |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF7": { |
|
"text": "Results of the human evaluation. Reported are average message length k, human layperson CSR H /ACC H , and machine layperson CSR L /ACC L . Only explainers of the same classifier can be compared in terms of CSR.", |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |