|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T01:08:51.677709Z" |
|
}, |
|
"title": "The elephant in the interpretability room: Why use attention as explanation when we have saliency methods?", |
|
"authors": [ |
|
{ |
|
"first": "Jasmijn", |
|
"middle": [], |
|
"last": "Bastings", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "bastings@google.com" |
|
}, |
|
{ |
|
"first": "Katja", |
|
"middle": [], |
|
"last": "Filippova", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "katjaf@google.com" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "There is a recent surge of interest in using attention as explanation of model predictions, with mixed evidence on whether attention can be used as such. While attention conveniently gives us one weight per input token and is easily extracted, it is often unclear toward what goal it is used as explanation. We find that often that goal, whether explicitly stated or not, is to find out what input tokens are the most relevant to a prediction, and that the implied user for the explanation is a model developer. For this goal and user, we argue that input saliency methods are better suited, and that there are no compelling reasons to use attention, despite the coincidence that it provides a weight for each input. With this position paper, we hope to shift some of the recent focus on attention to saliency methods, and for authors to clearly state the goal and user for their explanations.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "There is a recent surge of interest in using attention as explanation of model predictions, with mixed evidence on whether attention can be used as such. While attention conveniently gives us one weight per input token and is easily extracted, it is often unclear toward what goal it is used as explanation. We find that often that goal, whether explicitly stated or not, is to find out what input tokens are the most relevant to a prediction, and that the implied user for the explanation is a model developer. For this goal and user, we argue that input saliency methods are better suited, and that there are no compelling reasons to use attention, despite the coincidence that it provides a weight for each input. With this position paper, we hope to shift some of the recent focus on attention to saliency methods, and for authors to clearly state the goal and user for their explanations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Attention mechanisms (Bahdanau et al., 2015) have allowed for performance gains in many areas of NLP, including, inter alia, machine translation (Bahdanau et al., 2015; Luong et al., 2015; Vaswani et al., 2017) , natural language generation (e.g., Rush et al., 2015; Narayan et al., 2018) , and natural language inference (e.g., Parikh et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 44, |
|
"text": "(Bahdanau et al., 2015)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 145, |
|
"end": 168, |
|
"text": "(Bahdanau et al., 2015;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 169, |
|
"end": 188, |
|
"text": "Luong et al., 2015;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 189, |
|
"end": 210, |
|
"text": "Vaswani et al., 2017)", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 248, |
|
"end": 266, |
|
"text": "Rush et al., 2015;", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 267, |
|
"end": 288, |
|
"text": "Narayan et al., 2018)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 329, |
|
"end": 349, |
|
"text": "Parikh et al., 2016)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Attention has not only allowed for better performance, it also provides a window into how a model is operating. For example, for machine translation, Bahdanau et al. (2015) visualize what source tokens the target tokens are attending to, often aligning words that are translations of each other.", |
|
"cite_spans": [ |
|
{ |
|
"start": 150, |
|
"end": 172, |
|
"text": "Bahdanau et al. (2015)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Whether the window that attention gives into how a model operates amounts to explanation has recently become subject to debate ( \u00a72). While many papers published on the topic of explainable AI have been criticised for not defining explanations (Lipton, 2018; Miller, 2019) , the first key studies which spawned interest in attention as explanation (Jain and Wallace, 2019; Serrano and Smith, 2019; Wiegreffe and Pinter, 2019) do say that they are interested in whether attention weights faithfully represent the responsibility each input token has on a model prediction. That is, the narrow definition of explanation implied there is that it points at the most important input tokens for a prediction (arg max), accurately summarizing the reasoning process of the model (Jacovi and Goldberg, 2020b) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 244, |
|
"end": 258, |
|
"text": "(Lipton, 2018;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 259, |
|
"end": 272, |
|
"text": "Miller, 2019)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 348, |
|
"end": 372, |
|
"text": "(Jain and Wallace, 2019;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 373, |
|
"end": 397, |
|
"text": "Serrano and Smith, 2019;", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 398, |
|
"end": 425, |
|
"text": "Wiegreffe and Pinter, 2019)", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 770, |
|
"end": 798, |
|
"text": "(Jacovi and Goldberg, 2020b)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The above works have inspired some to find ways to make attention more faithful and/or plausible, by changing the nature of the hidden representations attention is computed over using special training objectives (e.g., Mohankumar et al., 2020; Tutek and Snajder, 2020) . Others have proposed replacing the attention mechanism with a latent alignment model (Deng et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 219, |
|
"end": 243, |
|
"text": "Mohankumar et al., 2020;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 244, |
|
"end": 268, |
|
"text": "Tutek and Snajder, 2020)", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 356, |
|
"end": 375, |
|
"text": "(Deng et al., 2018)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Interestingly, the implied definition of explanation in the cited works, happens to coincide with what input saliency methods ( \u00a73) are designed to produce (Li et al., 2016a; Sundararajan et al., 2017; Ribeiro et al., 2016; Montavon et al., 2019, i.a.) . Moreover, the user of that explanation is often implied to be a model developer, to which faithfulness is important. The elephant in the room is therefore: If the goal of using attention as explanation is to assign importance weights to the input tokens in a faithful manner, why should the attention mechanism be preferred over the multitude of existing input saliency methods designed to do exactly that? In this position paper, with that goal in mind, we argue that we should pay attention no heed ( \u00a74). We propose that we reduce our focus on attention as explanation, and shift it to input saliency methods instead. However, we do emphasize that understanding the role of attention is still a valid research goal ( \u00a75), and finally, we discuss a few approaches that go beyond saliency ( \u00a76).", |
|
"cite_spans": [ |
|
{ |
|
"start": 156, |
|
"end": 174, |
|
"text": "(Li et al., 2016a;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 175, |
|
"end": 201, |
|
"text": "Sundararajan et al., 2017;", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 202, |
|
"end": 223, |
|
"text": "Ribeiro et al., 2016;", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 224, |
|
"end": 252, |
|
"text": "Montavon et al., 2019, i.a.)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this section we summarize the debate on whether attention is explanation. The debate mostly features simple BiLSTM text classifiers (see Figure 1 ). Unlike Transformers (Vaswani et al., 2017) , they only contain a single attention mechanism, which is typically MLP-based (Bahdanau et al., 2015) :", |
|
"cite_spans": [ |
|
{ |
|
"start": 172, |
|
"end": 194, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 274, |
|
"end": 297, |
|
"text": "(Bahdanau et al., 2015)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 140, |
|
"end": 148, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The Attention Debate", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "e i = v tanh(W h h i +W q q) \u03b1 i = exp e i k exp e k", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "The Attention Debate", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "where \u03b1 i is the attention score for BiLSTM state h i . When there is a single input text, there is no query, and q is either a trained parameter (like v, W h and W q ), or W q q is simply left out of Eq. 1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Attention Debate", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Jain and Wallace 2019show that attention is often uncorrelated with gradient-based feature importance measures, and that one can often find a completely different set of attention weights that results in the same prediction. In addition to that, Serrano and Smith (2019) find, by modifying attention weights, that they often do not identify those representations that are most most important to the prediction of the model. However, Wiegreffe and Pinter (2019) claim that these works do not disprove the usefulness of attention as explanation per se, and provide four tests to determine if or when it can be used as such. In one such test, they are able to find alternative attention weights using an adversarial training setup, which suggests attention is not always a faithful explanation. Finally, Pruthi et al. (2020) propose a method to produce deceptive attention weights. Their method reduces how much weight is assigned to a set of 'impermissible' tokens, even when the models demonstratively rely on those tokens for their predictions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 433, |
|
"end": 460, |
|
"text": "Wiegreffe and Pinter (2019)", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 801, |
|
"end": 821, |
|
"text": "Pruthi et al. (2020)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Is attention (not) explanation?", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "In the attention-as-explanation research to date text classification with LSTMs received the most scrutiny. However, Vashishth et al. (2019) question why one should focus on single-sequence tasks at all because the attention mechanism is arguably far less important there than in models involving two sequences, like NLI or MT. Indeed, the performance of an NMT model degrades substantially if uniform weights are used, while random attention weights affect the text classification performance minimally. Therefore, findings from text classification studies may not generalize to tasks where attention is a crucial component.", |
|
"cite_spans": [ |
|
{ |
|
"start": 117, |
|
"end": 140, |
|
"text": "Vashishth et al. (2019)", |
|
"ref_id": "BIBREF42" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Was the right task analyzed?", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "x 1 x 2 x 3 x 4 I love this movie \u03b1 1 \u03b1 2 \u03b1 3 \u03b1 4 y \u2211 i \u03b1 i h i h 1 h 2 h 3 h 4", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Was the right task analyzed?", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Figure 1: A typical model in the debate.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Was the right task analyzed?", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Interestingly, even for the task of MT, the first case where attention was visualized to inspect a model ( \u00a71), Ding et al. (2019) find that saliency methods ( \u00a73) yield better word alignments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 112, |
|
"end": 130, |
|
"text": "Ding et al. (2019)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Was the right task analyzed?", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Grimsley et al. (2020) go as far as saying that attention is not explanation by definition, if a causal definition of explanation is assumed. Drawing on the work in philosophy, they point out that causal explanations presuppose that a surgical intervention is possible which is not the case with deep neural networks: one cannot intervene on attention while keeping all the other variables invariant.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Is a causal definition assumed?", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "The problems with using as attention as explanation, especially regarding faithfulness, have inspired some to try and 'improve' the attention weights, so to make them more faithful and/or plausible. Mohankumar et al. (2020) observe high similarity between the hidden representations of LSTM states and propose a diversity-driven training objective that makes the hidden representations more diverse across time steps. They show using representation erasure that the resulting attention weights result in decision flips more easily as compared to vanilla attention. With a similar motivation, Tutek and Snajder (2020) use a word-level objective to achieve a stronger connection between hidden states and the words they represent, which affects attention. Not part of the recent debate, Deng et al. (2018) propose variational attention as an alternative to the soft attention of Bahdanau et al. (2015) , arguing that the latter is not alignment, only an approximation thereof. They have the additional benefit of allowing posterior alignments, conditioned on the input and the output sentences.", |
|
"cite_spans": [ |
|
{ |
|
"start": 785, |
|
"end": 803, |
|
"text": "Deng et al. (2018)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 877, |
|
"end": 899, |
|
"text": "Bahdanau et al. (2015)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Can attention be improved?", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "In this section we discuss various input saliency methods for NLP as alternatives to attention: gradient-based ( \u00a73.1), propagation-based ( \u00a73.2), and occlusion-based methods ( \u00a73.3), following Arras et al. 2019. We do not endorse any specific method 1 , but rather try to give an overview of methods and how they differ. We discuss methods that are applicable to any neural NLP model, allowing access to model internals, such as activations and gradients, as attention itself requires such access. We leave out more expensive methods that use a surrogate model, e.g., LIME (Ribeiro et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 569, |
|
"end": 596, |
|
"text": "LIME (Ribeiro et al., 2016)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Saliency Methods", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "While used earlier in other fields, Li et al. (2016a) use gradients as explanation in NLP and compute:", |
|
"cite_spans": [ |
|
{ |
|
"start": 36, |
|
"end": 53, |
|
"text": "Li et al. (2016a)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Gradient-based methods", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u2207 x i f c (x 1:n )", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Gradient-based methods", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where x i is the input word embedding for time step i, x 1:n = x 1 , . . . , x n are the input embeddings (e.g., a sentence), and f c (x 1:n ) the model output for target class c. After taking the L2 norm of Eq. 2, the result is a measure of how sensitive the model is to the input at time step i. If instead we take the dot product of Eq. 2 with the input word embedding x i , we arrive at the gradient\u00d7input method (Denil et al., 2015) , which returns a saliency (scalar) of input i:", |
|
"cite_spans": [ |
|
{ |
|
"start": 417, |
|
"end": 437, |
|
"text": "(Denil et al., 2015)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Gradient-based methods", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u2207 x i f c (x 1:n ) \u2022 x i", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Gradient-based methods", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Integrated gradients (IG) (Sundararajan et al., 2017 ) is a gradient-based method which deals with the problem of saturation: gradients may get close to zero for a well-fitted function. IG requires a baseline b 1:n , e.g., all-zeros vectors or repeated [MASK] vectors. For input i, we compute:", |
|
"cite_spans": [ |
|
{ |
|
"start": 26, |
|
"end": 52, |
|
"text": "(Sundararajan et al., 2017", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 253, |
|
"end": 259, |
|
"text": "[MASK]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Gradient-based methods", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "1 m m k=1 \u2207 x i f c b 1:n + k m (x 1:n \u2212b 1:n ) \u2022(x i \u2212b i ) (4)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Gradient-based methods", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "That is, we average over m gradients, with the inputs to f c being linearly interpolated between the baseline and the original input x 1:n in m steps. We then take the dot product of that averaged gradient with the input embedding x i minus the baseline. We propose distinguishing sensitivity from saliency, following Ancona et al. (2019) : the former says how much a change in the input changes the output, while the latter is the marginal effect of each input word on the prediction. Gradients measure sensitivity, whereas gradient\u00d7input and IG measure saliency. A model can be sensitive to the input at a time step, but it depends on the actual input vector if it was important for the prediction.", |
|
"cite_spans": [ |
|
{ |
|
"start": 318, |
|
"end": 338, |
|
"text": "Ancona et al. (2019)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Gradient-based methods", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Propagation-based methods (Landecker et al., 2013; Bach et al., 2015; Arras et al., 2017, i.a.) , of which we discuss Layer-wise Relevance Propagation (LRP) in particular, start with a forward pass to obtain the output f c (x 1:n ), which is the toplevel relevance. They then use a special backward pass that, at each layer, redistributes the incoming relevance among the inputs of that layer. Each kind of layer has its own propagation rules. For example, there are different rules for feed-forward layers (Bach et al., 2015) and LSTM layers (Arras et al., 2017) . Relevance is redistributed until we arrive at the input layers. While LRP requires implementing a custom backward pass, it does allow precise control to preserve relevance, and it has been shown to work better than using gradient-based methods on text classification (Arras et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 26, |
|
"end": 50, |
|
"text": "(Landecker et al., 2013;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 51, |
|
"end": 69, |
|
"text": "Bach et al., 2015;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 70, |
|
"end": 95, |
|
"text": "Arras et al., 2017, i.a.)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 507, |
|
"end": 526, |
|
"text": "(Bach et al., 2015)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 543, |
|
"end": 563, |
|
"text": "(Arras et al., 2017)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 833, |
|
"end": 853, |
|
"text": "(Arras et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Propagation-based methods", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Occlusion-based methods (Zeiler and Fergus, 2014; Li et al., 2016b) compute input saliency by occluding (or erasing) input features and measuring how that affects the model. Intuitively, erasing unimportant features does not affect the model, whereas the opposite is true for important features. Li et al. (2016b) erase word embedding dimensions and whole words to see how doing so affects the model. They compute the importance of a word on a dataset by averaging over how much, for each example, erasing that word caused a difference in the output compared to not erasing that word.", |
|
"cite_spans": [ |
|
{ |
|
"start": 24, |
|
"end": 49, |
|
"text": "(Zeiler and Fergus, 2014;", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 50, |
|
"end": 67, |
|
"text": "Li et al., 2016b)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 296, |
|
"end": 313, |
|
"text": "Li et al. (2016b)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Occlusion-based methods", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "As a saliency method, however, we can apply their method on a single example only. For input i:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Occlusion-based methods", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "f c (x 1:n ) \u2212 f c (x 1:n|x i =0 )", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Occlusion-based methods", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "computes saliency, where x 1:n|x i =0 indicates that input word embedding x i was zeroed out, while the other inputs were unmodified. K\u00e1d\u00e1r et al. (2017) and Poerner et al. (2018) use a variant, omission, by simply leaving the word out of the input. This method requires n + 1 forward passes. It is also used for evaluation, to see if important words another method has identified bring a change in model output (e.g., DeYoung et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 134, |
|
"end": 153, |
|
"text": "K\u00e1d\u00e1r et al. (2017)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 158, |
|
"end": 179, |
|
"text": "Poerner et al. (2018)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 419, |
|
"end": 440, |
|
"text": "DeYoung et al., 2020)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Occlusion-based methods", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We discussed the use of attention as explanation ( \u00a72) and input saliency methods as alternatives ( \u00a73). We will now argue why saliency methods should be preferred over attention for explanation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Saliency vs. Attention", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In many of the cited papers, whether implicitly or explicitly, the goal of the explanation is to reveal which input words are the most important ones for the final prediction. This is perhaps a consequence of attention computing one weight per input, so it is necessarily understood in terms of those inputs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Saliency vs. Attention", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The intended user for the explanation is often not stated, but typically that user is a model developer, and not a non-expert end user, for example. For model developers, faithfulness, the need for an explanation to accurately represent the reasoning of the model, is a key concern. On the other hand, plausibility is of lesser concern, because a model developer aims to understand and possibly improve the model, and that model does not necessarily align with human intuition (see Jacovi and Goldberg, 2020b, for a detailed discussion of the differences between faithfulness and plausibility).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Saliency vs. Attention", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "With this goal and user clearly stated, it is impossible to make an argument in favor of using attention as explanation. Input saliency methods are addressing the goal head-on: they reveal why one particular model prediction was made in terms of how relevant each input word was to that prediction. Moreover, input saliency methods typically take the entire computation path into account, all the way from the input word embeddings to the target output prediction value. Attention weights do not: they reflect, at one point in the computation, how much the model attends to each input representation, but those representations might already have mixed in information from other inputs. Ironically, attention-as-explanation is sometimes evaluated by comparing it against gradient-based measures, which again begs the question why we wouldn't use those measures in the first place.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Saliency vs. Attention", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "One might argue that attention, despite its flaws, is easily extracted and computationally efficient. However, it only takes one line in a framework like TensorFlow to compute the gradient of the output w.r.t. the input word embeddings, so implementation difficulty is not a strong argument. In terms of efficiency, it is true that for attention only a forward pass is required, but many other methods discussed at most require a forward and then a backward pass, which is still extremely efficient.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Saliency vs. Attention", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In this position paper we criticized the use of attention to assess input saliency for the benefit of the model developer. We emphasize that understanding the role of the attention mechanism is a perfectly justified research goal. For example, Voita et al. (2019) and Michel et al. (2019) analyze the role of attention heads in the Transformer architecture and identify a few distinct functions they have, and Strubell et al. (2018) train attention heads to perform dependency parsing, adding a linguistic bias.", |
|
"cite_spans": [ |
|
{ |
|
"start": 244, |
|
"end": 263, |
|
"text": "Voita et al. (2019)", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 268, |
|
"end": 288, |
|
"text": "Michel et al. (2019)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 410, |
|
"end": 432, |
|
"text": "Strubell et al. (2018)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Attention is not not interesting", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We also stress that if the definition of explanation is adjusted, for example if a different intended user and a different explanatory goal are articulated, attention may become a useful explanation for a certain application. For example, Strout et al. (2019) demonstrate that supervised attention helps humans accomplish a task faster than random or unsupervised attention, for a user and goal that are very different from those implied in \u00a72.", |
|
"cite_spans": [ |
|
{ |
|
"start": 239, |
|
"end": 259, |
|
"text": "Strout et al. (2019)", |
|
"ref_id": "BIBREF38" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Attention is not not interesting", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "6 Is Saliency the Ultimate Answer?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Attention is not not interesting", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Beyond saliency. While we have argued that saliency methods are a good fit for our goal, there are other goals for which different methods can be a better fit. For example, counterfactual analysis might lead to insights, aided by visualization tools (Vig, 2019; Hoover et al., 2020; Abnar and Zuidema, 2020) . The DiffMask method of DeCao et al. (2020) adds another dimension: it not only reveals in what layer a model knows what inputs are important, but also where important information is stored as it flows through the layers of the model. Other examples are models that rationalize their predictions (Lei et al., 2016; Bastings et al., 2019) , which can guarantee faithful explanations, although they might be sensitive to so-called trojans (Jacovi and Goldberg, 2020a).", |
|
"cite_spans": [ |
|
{ |
|
"start": 250, |
|
"end": 261, |
|
"text": "(Vig, 2019;", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 262, |
|
"end": 282, |
|
"text": "Hoover et al., 2020;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 283, |
|
"end": 307, |
|
"text": "Abnar and Zuidema, 2020)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 605, |
|
"end": 623, |
|
"text": "(Lei et al., 2016;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 624, |
|
"end": 646, |
|
"text": "Bastings et al., 2019)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Attention is not not interesting", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "A known problem with occlusion-based saliency methods as well as erasure-based evaluation of any input saliency technique (Bach et al., 2015; DeYoung et al., 2020) is that changes in the predicted probabilities may be due to the fact that the corrupted input falls off the manifold of the training data (Hooker et al., 2019) . That is, a drop in probability can be explained by the input being OOD and not by an important feature missing. It has also been demonstrated that at least some of the saliency methods are not reliable and produce unintuitive results (Kindermans et al., 2017) or violate certain axioms (Sundararajan et al., 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 122, |
|
"end": 141, |
|
"text": "(Bach et al., 2015;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 142, |
|
"end": 163, |
|
"text": "DeYoung et al., 2020)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 303, |
|
"end": 324, |
|
"text": "(Hooker et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 561, |
|
"end": 586, |
|
"text": "(Kindermans et al., 2017)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 613, |
|
"end": 640, |
|
"text": "(Sundararajan et al., 2017)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations of saliency.", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "A more fundamental limitation is the expressiveness of input saliency methods. Obviously, a bag of per-token saliency weights can be called an explanation only in a very narrow sense. One can overcome some limitations of the flat representation of importance by indicating dependencies between important features (for example, Janizek et al. (2020) present an extension of IG which explains pairwise feature interactions) but it is hardly possible to fully understand why a deep non-linear model produced a certain prediction by only looking at the input tokens.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations of saliency.", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We summarized the debate on whether attention is explanation, and observed that the goal for explanation is often to determine what inputs are the most relevant to the prediction. The user for that explanation often goes unstated, but is typically assumed to be a model developer. With this goal and user clearly stated, we argued that input saliency methods-of which we discussed a few-are better suited than attention. We hope, at least for the goal and user that we identified, that the focus shifts from attention to input saliency methods, and perhaps to entirely different methods, goals, and users.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "For an evaluation of methods for explaining LSTM-based models, see e.g.,Poerner et al. (2018) andArras et al. (2019).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank Sebastian Gehrmann for useful comments and suggestions, as well as our anonymous reviewers, one of whom mentioned there is a whale in the room as well.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Quantifying attention flow in transformers", |
|
"authors": [ |
|
{ |
|
"first": "Samira", |
|
"middle": [], |
|
"last": "Abnar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Willem", |
|
"middle": [], |
|
"last": "Zuidema", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4190--4197", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.385" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Samira Abnar and Willem Zuidema. 2020. Quantify- ing attention flow in transformers. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 4190-4197, On- line. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Gradient-Based Attribution Methods", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Ancona", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Enea", |
|
"middle": [], |
|
"last": "Ceolini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cengiz\u00f6ztireli", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Markus", |
|
"middle": [], |
|
"last": "Gross", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "169--191", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/978-3-030-28954-6_9" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Ancona, Enea Ceolini, Cengiz\u00d6ztireli, and Markus Gross. 2019. Gradient-Based Attribution Methods, pages 169-191. Springer International Publishing, Cham.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Explaining recurrent neural network predictions in sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Leila", |
|
"middle": [], |
|
"last": "Arras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gr\u00e9goire", |
|
"middle": [], |
|
"last": "Montavon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klaus-Robert", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wojciech", |
|
"middle": [], |
|
"last": "Samek", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 8th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "159--168", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W17-5221" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leila Arras, Gr\u00e9goire Montavon, Klaus-Robert M\u00fcller, and Wojciech Samek. 2017. Explaining recurrent neural network predictions in sentiment analysis. In Proceedings of the 8th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis, pages 159-168, Copenhagen, Den- mark. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Evaluating recurrent neural network explanations", |
|
"authors": [ |
|
{ |
|
"first": "Leila", |
|
"middle": [], |
|
"last": "Arras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ahmed", |
|
"middle": [], |
|
"last": "Osman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klaus-Robert", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wojciech", |
|
"middle": [], |
|
"last": "Samek", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "113--126", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-4813" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leila Arras, Ahmed Osman, Klaus-Robert M\u00fcller, and Wojciech Samek. 2019. Evaluating recurrent neural network explanations. In Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Inter- preting Neural Networks for NLP, pages 113-126, Florence, Italy. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "On pixel-wise explanations for non-linear classifier decisions by layer-wise relevance propagation", |
|
"authors": [ |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Bach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Binder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gr\u00e9goire", |
|
"middle": [], |
|
"last": "Montavon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frederick", |
|
"middle": [], |
|
"last": "Klauschen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klaus-Robert", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wojciech", |
|
"middle": [], |
|
"last": "Samek", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "PLOS ONE", |
|
"volume": "10", |
|
"issue": "7", |
|
"pages": "1--46", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1371/journal.pone.0130140" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastian Bach, Alexander Binder, Gr\u00e9goire Mon- tavon, Frederick Klauschen, Klaus-Robert M\u00fcller, and Wojciech Samek. 2015. On pixel-wise explana- tions for non-linear classifier decisions by layer-wise relevance propagation. PLOS ONE, 10(7):1-46.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Neural machine translation by jointly learning to align and translate", |
|
"authors": [ |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "3rd International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2015. Neural machine translation by jointly learning to align and translate. In 3rd Inter- national Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Interpretable neural predictions with differentiable binary variables", |
|
"authors": [ |
|
{ |
|
"first": "Jasmijn", |
|
"middle": [], |
|
"last": "Bastings", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wilker", |
|
"middle": [], |
|
"last": "Aziz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Titov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2963--2977", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1284" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jasmijn Bastings, Wilker Aziz, and Ivan Titov. 2019. Interpretable neural predictions with differentiable binary variables. In Proceedings of the 57th Annual Meeting of the Association for Computational Lin- guistics, pages 2963-2977, Florence, Italy. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "How do decisions emerge across layers in neural models? interpretation with differentiable masking", |
|
"authors": [ |
|
{ |
|
"first": "Nicola", |
|
"middle": [], |
|
"last": "Decao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Schlichtkrull", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wilker", |
|
"middle": [], |
|
"last": "Aziz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Titov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nicola DeCao, Michael Schlichtkrull, Wilker Aziz, and Ivan Titov. 2020. How do decisions emerge across layers in neural models? interpretation with differ- entiable masking.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Latent alignment and variational attention", |
|
"authors": [ |
|
{ |
|
"first": "Yuntian", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Chiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Demi", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Rush", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Advances in Neural Information Processing Systems 31", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "9712--9724", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yuntian Deng, Yoon Kim, Justin Chiu, Demi Guo, and Alexander Rush. 2018. Latent alignment and variational attention. In S. Bengio, H. Wallach, H. Larochelle, K. Grauman, N. Cesa-Bianchi, and R. Garnett, editors, Advances in Neural Information Processing Systems 31, pages 9712-9724. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Extraction of salient sentences from labelled documents", |
|
"authors": [ |
|
{ |
|
"first": "Misha", |
|
"middle": [], |
|
"last": "Denil", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alban", |
|
"middle": [], |
|
"last": "Demiraj", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nando", |
|
"middle": [], |
|
"last": "De Freitas", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Misha Denil, Alban Demiraj, and Nando de Freitas. 2015. Extraction of salient sentences from labelled documents.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "ERASER: A benchmark to evaluate rationalized NLP models", |
|
"authors": [ |
|
{ |
|
"first": "Jay", |
|
"middle": [], |
|
"last": "Deyoung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sarthak", |
|
"middle": [], |
|
"last": "Jain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nazneen", |
|
"middle": [], |
|
"last": "Fatema Rajani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Lehman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Byron", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Wallace", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4443--4458", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.408" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jay DeYoung, Sarthak Jain, Nazneen Fatema Rajani, Eric Lehman, Caiming Xiong, Richard Socher, and Byron C. Wallace. 2020. ERASER: A benchmark to evaluate rationalized NLP models. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 4443-4458, On- line. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Saliency-driven word alignment interpretation for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Shuoyang", |
|
"middle": [], |
|
"last": "Ding", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hainan", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Fourth Conference on Machine Translation", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1--12", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-5201" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shuoyang Ding, Hainan Xu, and Philipp Koehn. 2019. Saliency-driven word alignment interpretation for neural machine translation. In Proceedings of the Fourth Conference on Machine Translation (Volume 1: Research Papers), pages 1-12, Florence, Italy. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Why attention is not explanation: Surgical intervention and causal reasoning about neural models", |
|
"authors": [ |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Grimsley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elijah", |
|
"middle": [], |
|
"last": "Mayfield", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julia", |
|
"middle": [ |
|
"R S" |
|
], |
|
"last": "Bursten", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of The 12th Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1780--1790", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christopher Grimsley, Elijah Mayfield, and Julia R.S. Bursten. 2020. Why attention is not expla- nation: Surgical intervention and causal reasoning about neural models. In Proceedings of The 12th Language Resources and Evaluation Conference, pages 1780-1790, Marseille, France. European Lan- guage Resources Association.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "A benchmark for interpretability methods in deep neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Hooker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dumitru", |
|
"middle": [], |
|
"last": "Erhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pieter-Jan", |
|
"middle": [], |
|
"last": "Kindermans", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Been", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "9737--9748", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sara Hooker, Dumitru Erhan, Pieter-Jan Kindermans, and Been Kim. 2019. A benchmark for interpretabil- ity methods in deep neural networks. In H. Wal- lach, H. Larochelle, A. Beygelzimer, F. dAlch\u00e9-Buc, E. Fox, and R. Garnett, editors, Advances in Neu- ral Information Processing Systems 32, pages 9737- 9748. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "2020. exBERT: A Visual Analysis Tool to Explore Learned Representations in Transformer Models", |
|
"authors": [ |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Hoover", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hendrik", |
|
"middle": [], |
|
"last": "Strobelt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Gehrmann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "187--196", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-demos.22" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Benjamin Hoover, Hendrik Strobelt, and Sebastian Gehrmann. 2020. exBERT: A Visual Analysis Tool to Explore Learned Representations in Transformer Models. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguistics: System Demonstrations, pages 187-196, Online. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Aligning faithful interpretations with their social attribution", |
|
"authors": [ |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Jacovi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alon Jacovi and Yoav Goldberg. 2020a. Aligning faith- ful interpretations with their social attribution.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Towards faithfully interpretable NLP systems: How should we define and evaluate faithfulness?", |
|
"authors": [ |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Jacovi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4198--4205", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.386" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alon Jacovi and Yoav Goldberg. 2020b. Towards faith- fully interpretable NLP systems: How should we de- fine and evaluate faithfulness? In Proceedings of the 58th Annual Meeting of the Association for Compu- tational Linguistics, pages 4198-4205, Online. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Attention is not Explanation", |
|
"authors": [ |
|
{ |
|
"first": "Sarthak", |
|
"middle": [], |
|
"last": "Jain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Byron", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Wallace", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "3543--3556", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1357" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sarthak Jain and Byron C. Wallace. 2019. Attention is not Explanation. In Proceedings of the 2019 Con- ference of the North American Chapter of the Asso- ciation for Computational Linguistics: Human Lan- guage Technologies, Volume 1 (Long and Short Pa- pers), pages 3543-3556, Minneapolis, Minnesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Lee. 2020. Explaining explanations: Axiomatic feature interactions for deep networks", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Joseph", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pascal", |
|
"middle": [], |
|
"last": "Janizek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Su", |
|
"middle": [], |
|
"last": "Sturmfels", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2002.04138" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joseph D Janizek, Pascal Sturmfels, and Su-In Lee. 2020. Explaining explanations: Axiomatic fea- ture interactions for deep networks. arXiv preprint arXiv:2002.04138.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Representation of linguistic form and function in recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Akos", |
|
"middle": [], |
|
"last": "K\u00e1d\u00e1r", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Grzegorz", |
|
"middle": [], |
|
"last": "Chrupa\u0142a", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Afra", |
|
"middle": [], |
|
"last": "Alishahi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Computational Linguistics", |
|
"volume": "43", |
|
"issue": "4", |
|
"pages": "761--780", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/COLI_a_00300" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Akos K\u00e1d\u00e1r, Grzegorz Chrupa\u0142a, and Afra Alishahi. 2017. Representation of linguistic form and func- tion in recurrent neural networks. Computational Linguistics, 43(4):761-780.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "The (un)reliability of saliency methods", |
|
"authors": [ |
|
{ |
|
"first": "Pieter-Jan", |
|
"middle": [], |
|
"last": "Kindermans", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Hooker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julius", |
|
"middle": [], |
|
"last": "Adebayo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maximilian", |
|
"middle": [], |
|
"last": "Alber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Kristof", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sven", |
|
"middle": [], |
|
"last": "Sch\u00fctt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dumitru", |
|
"middle": [], |
|
"last": "D\u00e4hne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Been", |
|
"middle": [], |
|
"last": "Erhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pieter-Jan Kindermans, Sara Hooker, Julius Ade- bayo, Maximilian Alber, Kristof T. Sch\u00fctt, Sven D\u00e4hne, Dumitru Erhan, and Been Kim. 2017. The (un)reliability of saliency methods.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Interpreting individual classifications of hierarchical networks", |
|
"authors": [ |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Landecker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Thomure", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"M A" |
|
], |
|
"last": "Bettencourt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Kenyon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Brumby", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "2013 IEEE Symposium on Computational Intelligence and Data Mining (CIDM)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "32--38", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "W. Landecker, M. D. Thomure, L. M. A. Bettencourt, M. Mitchell, G. T. Kenyon, and S. P. Brumby. 2013. Interpreting individual classifications of hierarchical networks. In 2013 IEEE Symposium on Computa- tional Intelligence and Data Mining (CIDM), pages 32-38.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Rationalizing neural predictions", |
|
"authors": [ |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Lei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Regina", |
|
"middle": [], |
|
"last": "Barzilay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tommi", |
|
"middle": [], |
|
"last": "Jaakkola", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "107--117", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D16-1011" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tao Lei, Regina Barzilay, and Tommi Jaakkola. 2016. Rationalizing neural predictions. In Proceedings of the 2016 Conference on Empirical Methods in Nat- ural Language Processing, pages 107-117, Austin, Texas. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Visualizing and understanding neural models in NLP", |
|
"authors": [ |
|
{ |
|
"first": "Jiwei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xinlei", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "681--691", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N16-1082" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiwei Li, Xinlei Chen, Eduard Hovy, and Dan Jurafsky. 2016a. Visualizing and understanding neural mod- els in NLP. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 681-691, San Diego, California. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Understanding neural networks through representation erasure", |
|
"authors": [ |
|
{ |
|
"first": "Jiwei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Will", |
|
"middle": [], |
|
"last": "Monroe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiwei Li, Will Monroe, and Dan Jurafsky. 2016b. Un- derstanding neural networks through representation erasure.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "The mythos of model interpretability", |
|
"authors": [ |
|
{ |
|
"first": "Zachary", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Lipton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Commun. ACM", |
|
"volume": "", |
|
"issue": "10", |
|
"pages": "36--43", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3233231" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zachary C. Lipton. 2018. The mythos of model inter- pretability. Commun. ACM, 61(10):36-43.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Effective approaches to attention-based neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1412--1421", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D15-1166" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thang Luong, Hieu Pham, and Christopher D. Man- ning. 2015. Effective approaches to attention-based neural machine translation. In Proceedings of the 2015 Conference on Empirical Methods in Natu- ral Language Processing, pages 1412-1421, Lis- bon, Portugal. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Are sixteen heads really better than one?", |
|
"authors": [ |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Michel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graham", |
|
"middle": [], |
|
"last": "Neubig", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "14014--14024", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paul Michel, Omer Levy, and Graham Neubig. 2019. Are sixteen heads really better than one? In H. Wal- lach, H. Larochelle, A. Beygelzimer, F. dAlch\u00e9-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems 32, pages 14014- 14024. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Explanation in artificial intelligence: Insights from the social sciences", |
|
"authors": [ |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Miller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Artif. Intell", |
|
"volume": "267", |
|
"issue": "", |
|
"pages": "1--38", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.artint.2018.07.007" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tim Miller. 2019. Explanation in artificial intelli- gence: Insights from the social sciences. Artif. In- tell., 267:1-38.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Balaji Vasan Srinivasan, and Balaraman Ravindran. 2020. Towards transparent and explainable attention models", |
|
"authors": [ |
|
{ |
|
"first": "Akash", |
|
"middle": [], |
|
"last": "Kumar Mohankumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preksha", |
|
"middle": [], |
|
"last": "Nema", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sharan", |
|
"middle": [], |
|
"last": "Narasimhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Mitesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Khapra", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4206--4216", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.387" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Akash Kumar Mohankumar, Preksha Nema, Sharan Narasimhan, Mitesh M. Khapra, Balaji Vasan Srini- vasan, and Balaraman Ravindran. 2020. Towards transparent and explainable attention models. In Proceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 4206- 4216, Online. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Layer-wise relevance propagation: An overview", |
|
"authors": [ |
|
{ |
|
"first": "Gr\u00e9goire", |
|
"middle": [], |
|
"last": "Montavon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Binder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Lapuschkin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wojciech", |
|
"middle": [], |
|
"last": "Samek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klaus-Robert", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Explainable AI: Interpreting, Explaining and Visualizing Deep Learning", |
|
"volume": "11700", |
|
"issue": "", |
|
"pages": "193--209", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gr\u00e9goire Montavon, Alexander Binder, Sebastian Lapuschkin, Wojciech Samek, and Klaus-Robert M\u00fcller. 2019. Layer-wise relevance propagation: An overview. In Wojciech Samek, Gr\u00e9goire Mon- tavon, Andrea Vedaldi, Lars Kai Hansen, and Klaus- Robert M\u00fcller, editors, Explainable AI: Interpreting, Explaining and Visualizing Deep Learning, volume 11700 of Lecture Notes in Computer Science, pages 193-209. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Don't give me the details, just the summary! topic-aware convolutional neural networks for extreme summarization", |
|
"authors": [ |
|
{ |
|
"first": "Shashi", |
|
"middle": [], |
|
"last": "Narayan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shay", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Cohen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1797--1807", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1206" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shashi Narayan, Shay B. Cohen, and Mirella Lapata. 2018. Don't give me the details, just the summary! topic-aware convolutional neural networks for ex- treme summarization. In Proceedings of the 2018 Conference on Empirical Methods in Natural Lan- guage Processing, pages 1797-1807, Brussels, Bel- gium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "A decomposable attention model for natural language inference", |
|
"authors": [ |
|
{ |
|
"first": "Ankur", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oscar", |
|
"middle": [], |
|
"last": "T\u00e4ckstr\u00f6m", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dipanjan", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2249--2255", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D16-1244" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ankur Parikh, Oscar T\u00e4ckstr\u00f6m, Dipanjan Das, and Jakob Uszkoreit. 2016. A decomposable attention model for natural language inference. In Proceed- ings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 2249-2255, Austin, Texas. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Evaluating neural network explanation methods using hybrid documents and morphosyntactic agreement", |
|
"authors": [ |
|
{ |
|
"first": "Nina", |
|
"middle": [], |
|
"last": "Poerner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hinrich", |
|
"middle": [], |
|
"last": "Sch\u00fctze", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "340--350", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-1032" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nina Poerner, Hinrich Sch\u00fctze, and Benjamin Roth. 2018. Evaluating neural network explanation meth- ods using hybrid documents and morphosyntactic agreement. In Proceedings of the 56th Annual Meet- ing of the Association for Computational Linguis- tics (Volume 1: Long Papers), pages 340-350, Mel- bourne, Australia. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Learning to deceive with attention-based explanations", |
|
"authors": [ |
|
{ |
|
"first": "Danish", |
|
"middle": [], |
|
"last": "Pruthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mansi", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bhuwan", |
|
"middle": [], |
|
"last": "Dhingra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graham", |
|
"middle": [], |
|
"last": "Neubig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zachary", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Lipton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4782--4793", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.432" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Danish Pruthi, Mansi Gupta, Bhuwan Dhingra, Gra- ham Neubig, and Zachary C. Lipton. 2020. Learn- ing to deceive with attention-based explanations. In Proceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 4782- 4793, Online. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "why should I trust you?\": Explaining the predictions of any classifier", |
|
"authors": [ |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Marco Tulio Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carlos", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Guestrin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1135--1144", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Tulio Ribeiro, Sameer Singh, and Carlos Guestrin. 2016. \"why should I trust you?\": Explain- ing the predictions of any classifier. In Proceed- ings of the 22nd ACM SIGKDD International Con- ference on Knowledge Discovery and Data Mining, San Francisco, CA, USA, August 13-17, 2016, pages 1135-1144.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "A neural attention model for abstractive sentence summarization", |
|
"authors": [ |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Rush", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sumit", |
|
"middle": [], |
|
"last": "Chopra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "379--389", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D15-1044" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexander M. Rush, Sumit Chopra, and Jason Weston. 2015. A neural attention model for abstractive sen- tence summarization. In Proceedings of the 2015 Conference on Empirical Methods in Natural Lan- guage Processing, pages 379-389, Lisbon, Portugal. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Is attention interpretable?", |
|
"authors": [ |
|
{ |
|
"first": "Sofia", |
|
"middle": [], |
|
"last": "Serrano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2931--2951", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1282" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sofia Serrano and Noah A. Smith. 2019. Is attention interpretable? In Proceedings of the 57th Annual Meeting of the Association for Computational Lin- guistics, pages 2931-2951, Florence, Italy. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Do human rationales improve machine explanations?", |
|
"authors": [ |
|
{ |
|
"first": "Julia", |
|
"middle": [], |
|
"last": "Strout", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ye", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raymond", |
|
"middle": [], |
|
"last": "Mooney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "56--62", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-4807" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Julia Strout, Ye Zhang, and Raymond Mooney. 2019. Do human rationales improve machine explana- tions? In Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 56-62, Florence, Italy. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Linguistically-informed self-attention for semantic role labeling", |
|
"authors": [ |
|
{ |
|
"first": "Emma", |
|
"middle": [], |
|
"last": "Strubell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Verga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Andor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Weiss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5027--5038", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1548" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emma Strubell, Patrick Verga, Daniel Andor, David Weiss, and Andrew McCallum. 2018. Linguistically-informed self-attention for semantic role labeling. In Proceedings of the 2018 Confer- ence on Empirical Methods in Natural Language Processing, pages 5027-5038, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Axiomatic attribution for deep networks", |
|
"authors": [ |
|
{ |
|
"first": "Mukund", |
|
"middle": [], |
|
"last": "Sundararajan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ankur", |
|
"middle": [], |
|
"last": "Taly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qiqi", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 34th International Conference on Machine Learning", |
|
"volume": "70", |
|
"issue": "", |
|
"pages": "3319--3328", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mukund Sundararajan, Ankur Taly, and Qiqi Yan. 2017. Axiomatic attribution for deep networks. In Pro- ceedings of the 34th International Conference on Machine Learning, ICML 2017, Sydney, NSW, Aus- tralia, 6-11 August 2017, volume 70 of Proceedings of Machine Learning Research, pages 3319-3328. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Staying true to your word: (how) can attention become explanation?", |
|
"authors": [ |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Tutek", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 5th Workshop on Representation Learning for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "131--142", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.repl4nlp-1.17" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martin Tutek and Jan Snajder. 2020. Staying true to your word: (how) can attention become explanation? In Proceedings of the 5th Workshop on Representa- tion Learning for NLP, pages 131-142, Online. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Attention interpretability across nlp tasks", |
|
"authors": [ |
|
{ |
|
"first": "Shikhar", |
|
"middle": [], |
|
"last": "Vashishth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shyam", |
|
"middle": [], |
|
"last": "Upadhyay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gaurav", |
|
"middle": [], |
|
"last": "Singh Tomar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manaal", |
|
"middle": [], |
|
"last": "Faruqui", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shikhar Vashishth, Shyam Upadhyay, Gaurav Singh Tomar, and Manaal Faruqui. 2019. Attention inter- pretability across nlp tasks.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "30", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Gar- nett, editors, Advances in Neural Information Pro- cessing Systems 30, pages 5998-6008. Curran Asso- ciates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "A multiscale visualization of attention in the transformer model", |
|
"authors": [ |
|
{ |
|
"first": "Jesse", |
|
"middle": [], |
|
"last": "Vig", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jesse Vig. 2019. A multiscale visualization of attention in the transformer model.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "Analyzing multi-head self-attention: Specialized heads do the heavy lifting, the rest can be pruned", |
|
"authors": [ |
|
{ |
|
"first": "Elena", |
|
"middle": [], |
|
"last": "Voita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Talbot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fedor", |
|
"middle": [], |
|
"last": "Moiseev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Titov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5797--5808", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1580" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elena Voita, David Talbot, Fedor Moiseev, Rico Sen- nrich, and Ivan Titov. 2019. Analyzing multi-head self-attention: Specialized heads do the heavy lift- ing, the rest can be pruned. In Proceedings of the 57th Annual Meeting of the Association for Com- putational Linguistics, pages 5797-5808, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Attention is not not explanation", |
|
"authors": [ |
|
{ |
|
"first": "Sarah", |
|
"middle": [], |
|
"last": "Wiegreffe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuval", |
|
"middle": [], |
|
"last": "Pinter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "11--20", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1002" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sarah Wiegreffe and Yuval Pinter. 2019. Attention is not not explanation. In Proceedings of the 2019 Con- ference on Empirical Methods in Natural Language Processing and the 9th International Joint Confer- ence on Natural Language Processing (EMNLP- IJCNLP), pages 11-20, Hong Kong, China. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "Visualizing and understanding convolutional networks", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Matthew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rob", |
|
"middle": [], |
|
"last": "Zeiler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Fergus", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Computer Vision -ECCV 2014", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "818--833", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew D. Zeiler and Rob Fergus. 2014. Visualizing and understanding convolutional networks. In Com- puter Vision -ECCV 2014, pages 818-833, Cham. Springer International Publishing.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": {} |
|
} |
|
} |