|
{ |
|
"paper_id": "I17-1004", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:39:48.570536Z" |
|
}, |
|
"title": "What does Attention in Neural Machine Translation Pay Attention to?", |
|
"authors": [ |
|
{ |
|
"first": "Hamidreza", |
|
"middle": [], |
|
"last": "Ghader", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Amsterdam", |
|
"location": { |
|
"country": "The Netherlands" |
|
} |
|
}, |
|
"email": "h.ghader@uva.nl" |
|
}, |
|
{ |
|
"first": "Christof", |
|
"middle": [], |
|
"last": "Monz", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Amsterdam", |
|
"location": { |
|
"country": "The Netherlands" |
|
} |
|
}, |
|
"email": "c.monz@uva.nl" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Attention in neural machine translation provides the possibility to encode relevant parts of the source sentence at each translation step. As a result, attention is considered to be an alignment model as well. However, there is no work that specifically studies attention and provides analysis of what is being learned by attention models. Thus, the question still remains that how attention is similar or different from the traditional alignment. In this paper, we provide detailed analysis of attention and compare it to traditional alignment. We answer the question of whether attention is only capable of modelling translational equivalent or it captures more information. We show that attention is different from alignment in some cases and is capturing useful information other than alignments.", |
|
"pdf_parse": { |
|
"paper_id": "I17-1004", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Attention in neural machine translation provides the possibility to encode relevant parts of the source sentence at each translation step. As a result, attention is considered to be an alignment model as well. However, there is no work that specifically studies attention and provides analysis of what is being learned by attention models. Thus, the question still remains that how attention is similar or different from the traditional alignment. In this paper, we provide detailed analysis of attention and compare it to traditional alignment. We answer the question of whether attention is only capable of modelling translational equivalent or it captures more information. We show that attention is different from alignment in some cases and is capturing useful information other than alignments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Neural machine translation (NMT) has gained a lot of attention recently due to its substantial improvements in machine translation quality achieving state-of-the-art performance for several languages (Luong et al., 2015b; Jean et al., 2015; Wu et al., 2016) . The core architecture of neural machine translation models is based on the general encoder-decoder approach (Sutskever et al., 2014) . Neural machine translation is an end-toend approach that learns to encode source sentences into distributed representations and decode these representations into sentences in the target language. Among the different neural MT models, attentional NMT (Bahdanau et al., 2015; Luong et al., 2015a) has become popular due to its capability to use the most relevant parts of the source sentence at each translation step. This capability also makes the attentional model superior in translating longer sentences (Bahdanau et al., 2015; Luong et al., 2015a) . Figure 1 : Visualization of the attention paid to the relevant parts of the source sentence for each generated word of a translation example. See how the attention is 'smeared out' over multiple source words in the case of \"would\" and \"like\". Figure 1 shows an example of how attention uses the most relevant source words to generate a target word at each step of the translation. In this paper we focus on studying the relevance of the attended parts, especially cases where attention is 'smeared out' over multiple source words where their relevance is not entirely obvious, see, e.g., \"would\" and \"like\" in Figure 1 . Here, we ask whether these are due to errors of the attention mechanism or are a desired behavior of the model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 200, |
|
"end": 221, |
|
"text": "(Luong et al., 2015b;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 222, |
|
"end": 240, |
|
"text": "Jean et al., 2015;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 241, |
|
"end": 257, |
|
"text": "Wu et al., 2016)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 368, |
|
"end": 392, |
|
"text": "(Sutskever et al., 2014)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 645, |
|
"end": 668, |
|
"text": "(Bahdanau et al., 2015;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 669, |
|
"end": 689, |
|
"text": "Luong et al., 2015a)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 901, |
|
"end": 924, |
|
"text": "(Bahdanau et al., 2015;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 925, |
|
"end": 945, |
|
"text": "Luong et al., 2015a)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 948, |
|
"end": 956, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1191, |
|
"end": 1199, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1558, |
|
"end": 1566, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Since the introduction of attention models in neural machine translation (Bahdanau et al., 2015) various modifications have been proposed (Luong et al., 2015a; Cohn et al., 2016; Liu et al., 2016) . However, to the best of our knowledge there is no study that provides an analysis of what kind of phenomena is being captured by attention. There are some works that have looked to attention as being similar to traditional word alignment (Alkhouli et al., 2016; Cohn et al., 2016; Liu et al., 2016; . Some of these approaches also experimented with training the attention model using traditional alignments (Alkhouli et al., 2016; Liu et al., 2016; . Liu et al. (2016) have shown that attention could be seen as a reordering model as well as an alignment model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 73, |
|
"end": 96, |
|
"text": "(Bahdanau et al., 2015)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 138, |
|
"end": 159, |
|
"text": "(Luong et al., 2015a;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 160, |
|
"end": 178, |
|
"text": "Cohn et al., 2016;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 179, |
|
"end": 196, |
|
"text": "Liu et al., 2016)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 437, |
|
"end": 460, |
|
"text": "(Alkhouli et al., 2016;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 461, |
|
"end": 479, |
|
"text": "Cohn et al., 2016;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 480, |
|
"end": 497, |
|
"text": "Liu et al., 2016;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 606, |
|
"end": 629, |
|
"text": "(Alkhouli et al., 2016;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 630, |
|
"end": 647, |
|
"text": "Liu et al., 2016;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 650, |
|
"end": 667, |
|
"text": "Liu et al. (2016)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we focus on investigating the differences between attention and alignment and what is being captured by the attention mechanism in general. The questions that we are aiming to answer include: Is the attention model only capable of modelling alignment? And how similar is attention to alignment in different syntactic phenomena?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our analysis shows that attention models traditional alignment in some cases more closely while it captures information beyond alignment in others. For instance, attention agrees with traditional alignments to a high degree in the case of nouns. However, it captures other information rather than only the translational equivalent in the case of verbs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This paper makes the following contributions: 1) We provide a detailed comparison of attention in NMT and word alignment. 2) We show that while different attention mechanisms can lead to different degrees of compliance with respect to word alignments, global compliance is not always helpful for word prediction. 3) We show that attention follows different patterns depending on the type of the word being generated. 4) We demonstrate that attention does not always comply with alignment. We provide evidence showing that the difference between attention and alignment is due to attention model capability to attend the context words influencing the current word translation. Liu et al. (2016) investigate how training the attention model in a supervised manner can benefit machine translation quality. To this end they use traditional alignments obtained by running automatic alignment tools (GIZA++ (Och and Ney, 2003) and fast align (Dyer et al., 2013) ) on the training data and feed it as ground truth to the attention network. They report some improvements in translation quality arguing that the attention model has learned to better align source and target words. The approach of training attention using traditional alignments has also been proposed by others Alkhouli et al., 2016) . show that guided attention with traditional alignment helps in the domain of e-commerce data which includes lots of out of vocabulary (OOV) product names and placeholders, but not much in the other domains. Alkhouli et al. (2016) have separated the alignment model and translation model, reasoning that this avoids propagation of errors from one model to the other as well as providing more flexibility in the model types and training of the models. They use a feed-forward neural network as their alignment model that learns to model jumps in the source side using HMM/IBM alignments obtained by using GIZA++. Shi et al. (2016) show that various kinds of syntactic information are being learned and encoded in the output hidden states of the encoder. The neural system for their experimental analysis is not an attentional model and they argue that attention does not have any impact for learning syntactic information. However, performing the same analysis for morphological information, Belinkov et al. (2017) show that attention has also some effect on the information that the encoder of neural machine translation system encodes in its output hidden states. As part of their analysis they show that a neural machine translation system that has an attention model can learn the POS tags of the source side more efficiently than a system without attention.", |
|
"cite_spans": [ |
|
{ |
|
"start": 676, |
|
"end": 693, |
|
"text": "Liu et al. (2016)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 893, |
|
"end": 920, |
|
"text": "(GIZA++ (Och and Ney, 2003)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 936, |
|
"end": 955, |
|
"text": "(Dyer et al., 2013)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1269, |
|
"end": 1291, |
|
"text": "Alkhouli et al., 2016)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 1501, |
|
"end": 1523, |
|
"text": "Alkhouli et al. (2016)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 1905, |
|
"end": 1922, |
|
"text": "Shi et al. (2016)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 2284, |
|
"end": 2306, |
|
"text": "Belinkov et al. (2017)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Recently, Koehn and Knowles (2017) carried out a brief analysis of how much attention and alignment match in different languages by measuring the probability mass that attention gives to alignments obtained from an automatic alignment tool. They also report differences based on the most attended words. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 34, |
|
"text": "Koehn and Knowles (2017)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "This section provides a short background on attention and discusses two most popular attention models which are also used in this paper. The first model is a non-recurrent attention model which is equivalent to the \"global attention\" method proposed by Luong et al. (2015a) . The second attention model that we use in our investigation is an input-feeding model similar to the attention model first proposed by Bahdanau et al. (2015) and turned to a more general one and called inputfeeding by Luong et al. (2015a) . Below we describe the details of both models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 253, |
|
"end": 273, |
|
"text": "Luong et al. (2015a)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 411, |
|
"end": 433, |
|
"text": "Bahdanau et al. (2015)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 494, |
|
"end": 514, |
|
"text": "Luong et al. (2015a)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Attention Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Both non-recurrent and input-feeding models compute a context vector c i at each time step. Subsequently, they concatenate the context vector to the hidden state of decoder and pass it through a non-linearity before it is fed into the softmax output layer of the translation network.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Attention Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "h t = tanh(W c [c t ; h t ])", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Attention Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The difference of the two models lays in the way they compute the context vector. In the nonrecurrent model, the hidden state of the decoder is compared to each hidden state of the encoder. Often, this comparison is realized as the dot product of vectors. Then the comparison result is fed to a softmax layer to compute the attention weight.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Attention Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "e t,i = h T i h t (2) \u03b1 t,i = exp(e t,i ) |x| j=1 exp(e t,j )", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Attention Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Here h t is the hidden state of the decoder at time t, h i is ith hidden state of the encoder and |x| is the length of the source sentence. Then the computed alignment weights are used to compute a weighted sum over the encoder hidden states which results in the context vector mentioned above:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Attention Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "c i = |x| i=1 \u03b1 t,i h i (4)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Attention Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The input-feeding model changes the context vector computation in a way that at each step t the context vector is aware of the previously computed context c t\u22121 . To this end, the input-feeding model feeds back its ownh t\u22121 to the network and uses the resulting hidden state instead of the contextindependent h t , to compare to the hidden states of RWTH data # of sentences 508 # of alignments 10534 % of sure alignments 91% % of possible alignments 9% the encoder. This is defined in the following equations:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Attention Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "h t = f (W [h t\u22121 ; y t\u22121 ]) (5) e t,i = h T i h t", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "Attention Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Here, f is the function that the stacked LSTM applies to the input, y t\u22121 is the last generated target word, andh t\u22121 is the output of previous time step of the input-feeding network itself, meaning the output of Equation 1 in the case that context vector has been computed using e t,i from Equation 6.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Attention Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "As mentioned above, it is a commonly held assumption that attention corresponds to word alignments. To verify this, we investigate whether higher consistency between attention and alignment leads to better translations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparing Attention with Alignment", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In order to compare attentions of multiple systems as well as to measure the difference between attention and word alignment, we convert the hard word alignments into soft ones and use cross entropy between attention and soft alignment as a loss function. For this purpose, we use manual alignments provided by RWTH German-English dataset as the hard alignments. The statistics of the data are given in Table 1 . We convert the hard alignments to soft alignments using Equation 7. For unaligned words, we first assume that they have been aligned to all the words in the source side and then do the conversion.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 403, |
|
"end": 410, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Measuring Attention-Alignment Accuracy", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "Al(x i , y t ) = 1 |Ay t | if x i \u2208 A yt 0 otherwise", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "Measuring Attention-Alignment Accuracy", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Here A yt is the set of source words aligned to target word y t and |A yt | is the number of source words in the set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Measuring Attention-Alignment Accuracy", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "After conversion of the hard alignments to soft ones, we compute the attention loss as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Measuring Attention-Alignment Accuracy", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "L At (y t ) = \u2212 |x| i=1 Al(x i , y t ) log(At(x i , y t )) (8)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Measuring Attention-Alignment Accuracy", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Here x is the source sentence and Al(x i , y t ) is the weight of the alignment link between source word x i and the target word (see Equation 7). At(x i , y t ) is the attention weight \u03b1 t,i (see Equation 3) of the source word x i , when generating the target word y t .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Measuring Attention-Alignment Accuracy", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In our analysis, we also look into the relation between translation quality and the quality of the attention with respect to the alignments. For measuring the quality of attention, we use the attention loss defined in Equation 8. As a measure of translation quality, we choose the loss between the output of our NMT system and the reference translation at each translation step, which we call word prediction loss. The word prediction loss for word y t is logarithm of the probability given in Equation 9.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Measuring Attention-Alignment Accuracy", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "p nmt (y t | y <t , x) = sof tmax(W oht ) (9)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Measuring Attention-Alignment Accuracy", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Here x is the source sentence, y t is target word at time step t, y <t is the target history given by the reference translation andh t is given by Equation 1 for either non-recurrent or input-feeding attention models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Measuring Attention-Alignment Accuracy", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Spearman's rank correlation is used to compute the correlation between attention loss and word prediction loss:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Measuring Attention-Alignment Accuracy", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u03c1 = Cov(R L At , R L W P ) \u03c3 R L At \u03c3 R L W P (10)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Measuring Attention-Alignment Accuracy", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "where R L At and R L W P are the ranks of the attention losses and word prediction losses, respectively, Cov is the covariance between two input variables, and \u03c3 R L At and \u03c3 R L W P are the standard deviations of R L At and R L W P . If there is a close relationship between word prediction quality and consistency of attention versus alignment, then there should be high correlation between word prediction loss and attention loss. Figure 2 shows an example with different levels of consistency between attention and word alignments. For the target words \"will\" and \"come\" the attention is not focused on the Table 1 ). See how attention is deviated from alignment points in the case of \"will\" and \"come\". manually aligned word but distributed between the aligned word and other words. The focus of this paper is examining cases where attention does not follow alignment, answering the questions whether those cases represent errors or desirable behavior of the attention model.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 434, |
|
"end": 442, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 611, |
|
"end": 618, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Measuring Attention-Alignment Accuracy", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "As another informative variable in our analysis, we look into the attention concentration. While most word alignments only involve one or a few words, attention can be distributed more freely. We measure the concentration of attention by computing the entropy of the attention distribution:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Measuring Attention Concentration", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "E At (y t ) = \u2212 |x| i=1 At(x i , y t ) log(At(x i , y t )) (11)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Measuring Attention Concentration", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We conduct our analysis using the two different attention models described in Section 3. Our first attention model is the global model without inputfeeding as introduced by Luong et al. (2015a) . The second model is the input-feeding model (Luong et al., 2015a) , which uses recurrent attention. Table 2 : Performance of our experimental system in BLEU on different standard WMT test sets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 173, |
|
"end": 193, |
|
"text": "Luong et al. (2015a)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 240, |
|
"end": 261, |
|
"text": "(Luong et al., 2015a)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 296, |
|
"end": 303, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Empirical Analysis of Attention Behaviour", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "NMT system is a unidirectional encoder-decoder system as described in (Luong et al., 2015a) , using 4 recurrent layers. We trained the systems with dimension size of 1,000 and batch size of 80 for 20 epochs. The vocabulary for both source and target side is set to be the 30K most common words. The learning rate is set to be 1 and a maximum gradient norm of 5 has been used. We also use a dropout rate of 0.3 to avoid overfitting. Table 3 : Statistics for the parallel corpus used to train our models. The length statistics are based on the source side.", |
|
"cite_spans": [ |
|
{ |
|
"start": 70, |
|
"end": 91, |
|
"text": "(Luong et al., 2015a)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 432, |
|
"end": 439, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Empirical Analysis of Attention Behaviour", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We train both of the systems on the WMT15 German-to-English training data, see Table 3 for some statistics. Table 2 shows the BLEU scores (Papineni et al., 2002) for both systems on different test sets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 138, |
|
"end": 161, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 79, |
|
"end": 86, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 108, |
|
"end": 115, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Impact of Attention Mechanism", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Since we use POS tags and dependency roles in our analysis, both of which are based on words, we chose not to use BPE (Sennrich et al., 2016) which operates at the sub-word level. non-recurrent input-feeding GIZA++ AER 0.60 0.37 0.31 Table 4 : Alignment error rate (AER) of the hard alignments produced from the output attentions of the systems with input-feeding and non-recurrent attention models. We use the most attended source word for each target word as the aligned word. The last column shows the AER for the alignment generated by GIZA++.", |
|
"cite_spans": [ |
|
{ |
|
"start": 118, |
|
"end": 141, |
|
"text": "(Sennrich et al., 2016)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 234, |
|
"end": 241, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Impact of Attention Mechanism", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We report alignment error rate (AER) (Och and Ney, 2000) , which is commonly used to measure alignment quality, in Table 4 to show the difference between attentions and human alignments provided by RWTH German-English dataset. To compute AER over attentions, we follow Luong non-recurrent input-feeding Attention loss 0.46 0.25 Table 5 : Average loss between attention generated by input-feeding and non-recurrent systems and the manual alignment over RWTH German-English data. et al. (2015a) to produce hard alignments from attentions by choosing the most attended source word for each target word. We also use GIZA++ (Och and Ney, 2003) to produce automatic alignments over the data set to allow for a comparison between automatically generated alignments and the attentions generated by our systems. GIZA++ is run in both directions and alignments are symmetrized using the grow-diag-final-and refined alignment heuristic. As shown in Table 4 , the input-feeding system not only achieves a higher BLEU score, but also uses attentions that are closer to the human alignments. Table 5 compares input-feeding and nonrecurrent attention in terms of attention loss computed using Equation 8. Here the losses between the attention produced by each system and the human alignments is reported. As expected, the difference in attention losses are in line with AER.", |
|
"cite_spans": [ |
|
{ |
|
"start": 37, |
|
"end": 56, |
|
"text": "(Och and Ney, 2000)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 457, |
|
"end": 492, |
|
"text": "German-English data. et al. (2015a)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 619, |
|
"end": 638, |
|
"text": "(Och and Ney, 2003)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 115, |
|
"end": 122, |
|
"text": "Table 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 328, |
|
"end": 335, |
|
"text": "Table 5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 938, |
|
"end": 945, |
|
"text": "Table 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1078, |
|
"end": 1085, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Impact of Attention Mechanism", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "The difference between these comparisons is that AER only takes the most attended word into account while attention loss considers the entire attention distribution.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Impact of Attention Mechanism", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Based on the results in Section 5.1, one might be inclined to conclude that the closer the attention is to the word alignments the better the translation. However, ; Liu et al. (2016) ; Alkhouli et al. (2016) report mixed results by optimizing their NMT system with respect to word prediction and alignment quality. These findings warrant a more fine-grained analysis of attention. To this end, we include POS tags in our analysis and study the patterns of attention based on POS tags of the target words. We choose POS tags be- cause they exhibit some simple syntactic characteristics. We use the coarse grained universal POS tags (Petrov et al., 2012) given in Table 6 . To better understand how attention accuracy affects translation quality, we analyse the relationship between attention loss and word prediction loss for individual part-of-speech classes. Figure 3a shows how attention loss differs when generating different POS tags. One can see that attention loss varies substantially across different POS tags. In particular, we focus on the cases of NOUN and VERB which are the most frequent POS tags in the dataset. As shown, the attention of NOUN is the closest to alignments on average. But the average attention loss for VERB is almost two times larger than the loss for NOUN.", |
|
"cite_spans": [ |
|
{ |
|
"start": 166, |
|
"end": 183, |
|
"text": "Liu et al. (2016)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 186, |
|
"end": 208, |
|
"text": "Alkhouli et al. (2016)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 632, |
|
"end": 653, |
|
"text": "(Petrov et al., 2012)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 663, |
|
"end": 670, |
|
"text": "Table 6", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 861, |
|
"end": 870, |
|
"text": "Figure 3a", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Alignment Quality Impact on Translation", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Considering this difference and the observations in Section 5.1, a natural follow-up would be to focus on getting the attention of verbs to be closer to alignments. However, Figure 3b shows that the average word prediction loss for verbs is actually smaller compared to the loss for nouns. In other words, although the attention for verbs is substantially more inconsistent with the word alignments than for nouns, the NMT system translates verbs more accurately than nouns on average. To formalize this relationship we compute Spearman's rank correlation between word prediction loss and attention loss, based on the POS tags of the target side, for the input-feeding model, see Figure 4 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 174, |
|
"end": 183, |
|
"text": "Figure 3b", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 680, |
|
"end": 688, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Alignment Quality Impact on Translation", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The low correlation for verbs confirms that attention to other parts of source sentence rather than the aligned word is necessary for translating verbs and that attention does not necessarily have to follow alignments. However, the higher correla- tion for nouns means that consistency of attention with alignments is more desirable. This could, in a way, explain the mixed result reported for training attention using alignments Liu et al., 2016; Alkhouli et al., 2016) . Especially the results by in which large improvements are achieved for the e-commerce domain which contains many OOV product names and placeholders, but no or very weak improvements were achieved over common domains.", |
|
"cite_spans": [ |
|
{ |
|
"start": 430, |
|
"end": 447, |
|
"text": "Liu et al., 2016;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 448, |
|
"end": 470, |
|
"text": "Alkhouli et al., 2016)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Alignment Quality Impact on Translation", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "In word alignment, most target words are aligned to one source word. The average number of source words aligned to nouns and verbs is 1.1 and 1.2 respectively. To investigate to what extent this also holds for attention we measure the attention concentration by computing the entropy of the attention distribution, see Equation 11. Figure 5a shows the average entropy of attention based on POS tags. As shown, nouns have one of the lowest entropies meaning that on average the attention for nouns tends to be concentrated. This also explains the closeness of the attention to alignments for nouns. In addition, the correlation between attention entropy and attention loss in case of nouns is high as shown in Figure 5b . This means that attention entropy can be used as a measure of closeness of attention to alignment in the case of nouns.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 332, |
|
"end": 341, |
|
"text": "Figure 5a", |
|
"ref_id": "FIGREF6" |
|
}, |
|
{ |
|
"start": 709, |
|
"end": 718, |
|
"text": "Figure 5b", |
|
"ref_id": "FIGREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Attention Concentration", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "The higher attention entropy for verbs, in Figure 5a, shows that the attention is more distributed compared to nouns. The low correlation between attention entropy and word prediction loss (see Figure 6 ) shows that attention concentration is not required when translating into verbs. This also confirms that the correct translation of verbs requires the systems to pay attention to different parts of the source sentence. Figure 6 : Correlation of attention entropy and word prediction loss for the input-feeding system.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 43, |
|
"end": 53, |
|
"text": "Figure 5a,", |
|
"ref_id": "FIGREF6" |
|
}, |
|
{ |
|
"start": 194, |
|
"end": 202, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 423, |
|
"end": 431, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Attention Concentration", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Another interesting observation here is the low correlation for pronouns (PRON) and particles (PRT), see Figure 6 . As can be seen in Figure 5a , these tags have more distributed attention comparing to nouns, for example. This could either mean that the attention model does not know where to focus or it deliberately pays attention to multiple, somehow relevant, places to be able to produce a better translation. The latter is supported by the relatively low word prediction losses, shown in the Figure 3b . All members in a coordination 3 Table 7 : The most attended dependency roles with their received attention percentage from the attention probability mass paid to the words other than the alignment points. Here, we focus on the POS tags discussed earlier.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 105, |
|
"end": 113, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 134, |
|
"end": 143, |
|
"text": "Figure 5a", |
|
"ref_id": "FIGREF6" |
|
}, |
|
{ |
|
"start": 498, |
|
"end": 507, |
|
"text": "Figure 3b", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 542, |
|
"end": 549, |
|
"text": "Table 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Attention Concentration", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "To further understand under which conditions attention is paid to words other than the aligned words, we study the distribution of attention over the source words. First, we measure how much attention is paid to the aligned words for each POS tag, on average. To this end, we compute the percentage of the probability mass that the attention model has assigned to aligned words for each POS tag, see Table 8 . Table 8 : Distribution of attention probability mass (in %) over alignment points and the rest of the words for each POS tag.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 400, |
|
"end": 407, |
|
"text": "Table 8", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 410, |
|
"end": 417, |
|
"text": "Table 8", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Attention Distribution", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "One can notice that less than half of the attention is paid to alignment points for most of the POS tags. To examine how the rest of attention in each case has been distributed over the source sentence we measure the attention distribution over dependency roles in the source side. We first parse the source side of RWTH data using the ParZu parser (Sennrich et al., 2013 ). Then we compute how the attention probability mass given to the words other than the alignment points, is distributed over dependency roles. Table 7 gives the most attended roles for each POS tag. Here, we focus on POS tags discussed earlier. One can see that the most attended roles when translating to nouns include adjectives and determiners and in the case of translating to verbs, it includes auxiliary verbs, adverbs (including negation), subjects, and objects.", |
|
"cite_spans": [ |
|
{ |
|
"start": 349, |
|
"end": 371, |
|
"text": "(Sennrich et al., 2013", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 516, |
|
"end": 523, |
|
"text": "Table 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Attention Distribution", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "In this paper, we have studied attention in neural machine translation and provided an analysis of the relation between attention and word alignment. We have shown that attention agrees with traditional alignment to a certain extent. However, this differs substantially by attention mechanism and the type of the word being generated. We have shown that attention has different patterns based on the POS tag of the target word. The concentrated pattern of attention and the relatively high correlations for nouns show that training the attention with explicit alignment labels is useful for generating nouns. However, this is not the case for verbs, since the large portion of attention being paid to words other than alignment points, is already capturing other relevant information. Training attention with alignments in this case will force the attention model to forget these useful information. This explains the mixed results reported when guiding attention to comply with alignments Liu et al., 2016; Alkhouli et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 990, |
|
"end": 1007, |
|
"text": "Liu et al., 2016;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 1008, |
|
"end": 1030, |
|
"text": "Alkhouli et al., 2016)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Punctuations have the role \"root\" in the parse generated using ParZu. However, we use the pos tag to discriminate them from tokens having the role \"root\".2 Attention mass for all different objects are summed up.3 Includes all different types of conjunctions and conjoined elements.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This research was funded in part by the Netherlands Organization for Scientific Research (NWO) under project numbers 639.022.213 and 612.001.218.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Alignment-based neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Tamer", |
|
"middle": [], |
|
"last": "Alkhouli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gabriel", |
|
"middle": [], |
|
"last": "Bretschner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan-Thorsten", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammed", |
|
"middle": [], |
|
"last": "Hethnawi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Guta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the First Conference on Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "54--65", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tamer Alkhouli, Gabriel Bretschner, Jan-Thorsten Pe- ter, Mohammed Hethnawi, Andreas Guta, and Her- mann Ney. 2016. Alignment-based neural machine translation. In Proceedings of the First Conference on Machine Translation, pages 54-65, Berlin, Ger- many. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Neural machine translation by jointly learning to align and translate", |
|
"authors": [ |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2015. Neural machine translation by jointly learning to align and translate. In International Con- ference on Learning Representations, San Diego, California, USA.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "What do neural machine translation models learn about morphology?", |
|
"authors": [ |
|
{ |
|
"first": "Yonatan", |
|
"middle": [], |
|
"last": "Belinkov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nadir", |
|
"middle": [], |
|
"last": "Durrani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fahim", |
|
"middle": [], |
|
"last": "Dalvi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hassan", |
|
"middle": [], |
|
"last": "Sajjad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Glass", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1704.03471" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yonatan Belinkov, Nadir Durrani, Fahim Dalvi, Hassan Sajjad, and James Glass. 2017. What do neural ma- chine translation models learn about morphology? arXiv preprint arXiv:1704.03471.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Guided alignment training for topic-aware neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Wenhu", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evgeny", |
|
"middle": [], |
|
"last": "Matusov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shahram", |
|
"middle": [], |
|
"last": "Khadivi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan-Thorsten", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "AMTA", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wenhu Chen, Evgeny Matusov, Shahram Khadivi, and Jan-Thorsten Peter. 2016. Guided alignment training for topic-aware neural machine translation. AMTA 2016, Vol., page 121.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Incorporating structural alignment biases into an attentional neural translation model", |
|
"authors": [ |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Cohn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cong Duy Vu", |
|
"middle": [], |
|
"last": "Hoang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ekaterina", |
|
"middle": [], |
|
"last": "Vymolova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kaisheng", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gholamreza", |
|
"middle": [], |
|
"last": "Haffari", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "876--885", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Trevor Cohn, Cong Duy Vu Hoang, Ekaterina Vy- molova, Kaisheng Yao, Chris Dyer, and Gholamreza Haffari. 2016. Incorporating structural alignment biases into an attentional neural translation model. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, pages 876-885, San Diego, California. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "A simple, fast, and effective reparameterization of ibm model 2", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Chahuneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "644--648", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris Dyer, Victor Chahuneau, and Noah A. Smith. 2013. A simple, fast, and effective reparameteriza- tion of ibm model 2. In Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 644-648, Atlanta, Georgia. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "On using very large target vocabulary for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "S\u00e9bastien", |
|
"middle": [], |
|
"last": "Jean", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roland", |
|
"middle": [], |
|
"last": "Memisevic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1--10", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S\u00e9bastien Jean, Kyunghyun Cho, Roland Memisevic, and Yoshua Bengio. 2015. On using very large target vocabulary for neural machine translation. In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Lan- guage Processing (Volume 1: Long Papers), pages 1-10, Beijing, China. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Six challenges for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rebecca", |
|
"middle": [], |
|
"last": "Knowles", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1706.03872" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn and Rebecca Knowles. 2017. Six challenges for neural machine translation. arXiv preprint arXiv:1706.03872.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Neural machine translation with supervised attention", |
|
"authors": [ |
|
{ |
|
"first": "Lemao", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Masao", |
|
"middle": [], |
|
"last": "Utiyama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Finch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eiichiro", |
|
"middle": [], |
|
"last": "Sumita", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of COL-ING 2016, the 26th International Conference on Computational Linguistics: Technical Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3093--3102", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lemao Liu, Masao Utiyama, Andrew Finch, and Ei- ichiro Sumita. 2016. Neural machine translation with supervised attention. In Proceedings of COL- ING 2016, the 26th International Conference on Computational Linguistics: Technical Papers, pages 3093-3102, Osaka, Japan. The COLING 2016 Or- ganizing Committee.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Effective approaches to attentionbased neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1412--1421", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thang Luong, Hieu Pham, and Christopher D. Man- ning. 2015a. Effective approaches to attention- based neural machine translation. pages 1412-1421.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Addressing the rare word problem in neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wojciech", |
|
"middle": [], |
|
"last": "Zaremba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "11--19", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thang Luong, Ilya Sutskever, Quoc Le, Oriol Vinyals, and Wojciech Zaremba. 2015b. Addressing the rare word problem in neural machine translation. In Pro- ceedings of the 53rd Annual Meeting of the Associ- ation for Computational Linguistics and the 7th In- ternational Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 11-19, Beijing, China. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Improved statistical alignment models", |
|
"authors": [ |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Franz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Och", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "38th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Franz Josef Och and Hermann Ney. 2000. Improved statistical alignment models. In 38th Annual Meet- ing of the Association for Computational Linguis- tics, Hong Kong, China, October 1-8, 2000.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "A systematic comparison of various statistical alignment models", |
|
"authors": [ |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Franz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Och", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Computational Linguistics", |
|
"volume": "29", |
|
"issue": "1", |
|
"pages": "19--51", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Franz Josef Och and Hermann Ney. 2003. A systematic comparison of various statistical alignment models. Computational Linguistics, 29(1):19-51.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "BLEU: a method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. BLEU: a method for automatic evaluation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pages 311-318.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "A universal part-of-speech tagset", |
|
"authors": [ |
|
{ |
|
"first": "Slav", |
|
"middle": [], |
|
"last": "Petrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dipanjan", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Slav Petrov, Dipanjan Das, and Ryan Mcdonald. 2012. A universal part-of-speech tagset. In Proceedings of the Language Resources and Evaluation Confer- ence.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Neural machine translation of rare words with subword units", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1715--1725", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural machine translation of rare words with subword units. In Proceedings of the 54th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1715- 1725. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Exploiting synergies between open resources for german dependency parsing, pos-tagging, and morphological analysis", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Volk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerold", |
|
"middle": [], |
|
"last": "Schneider", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the International Conference Recent Advances in Natural Language Processing RANLP 2013", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "601--609", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich, Martin Volk, and Gerold Schneider. 2013. Exploiting synergies between open resources for german dependency parsing, pos-tagging, and morphological analysis. In Proceedings of the In- ternational Conference Recent Advances in Natural Language Processing RANLP 2013, pages 601-609, Hissar, Bulgaria. INCOMA Ltd. Shoumen, BUL- GARIA.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Does string-based neural mt learn source syntax?", |
|
"authors": [ |
|
{ |
|
"first": "Xing", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Inkit", |
|
"middle": [], |
|
"last": "Padhi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1526--1534", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xing Shi, Inkit Padhi, and Kevin Knight. 2016. Does string-based neural mt learn source syntax? In Pro- ceedings of the 2016 Conference on Empirical Meth- ods in Natural Language Processing, pages 1526- 1534, Austin, Texas. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Sequence to sequence learning with neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc V", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Advances in neural information processing systems (NIPS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3104--3112", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V Le. 2014. Sequence to sequence learning with neural net- works. In Advances in neural information process- ing systems (NIPS), pages 3104-3112.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Google's neural machine translation system: Bridging the gap between human and machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Yonghui", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhifeng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Quoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wolfgang", |
|
"middle": [], |
|
"last": "Norouzi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maxim", |
|
"middle": [], |
|
"last": "Macherey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuan", |
|
"middle": [], |
|
"last": "Krikun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qin", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klaus", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Macherey", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1609.08144" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, et al. 2016. Google's neural ma- chine translation system: Bridging the gap between human and machine translation. arXiv preprint arXiv:1609.08144.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "The mixed results reported by Chen et al. (2016); Alkhouli et al. (2016); Liu et al. (2016) on optimizing attention with respect to alignments motivates a more thorough analysis of attention models in NMT.", |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "An example of inconsistent attention and alignment. The outlined cells show the manual alignments from the RWTH dataset (see", |
|
"uris": null |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "(a) Average attention loss based on the POS tags of the target side. (b) Average word prediction loss based on the POS tags of the target side.", |
|
"uris": null |
|
}, |
|
"FIGREF3": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Average attention losses and word prediction losses from the input-feeding system.", |
|
"uris": null |
|
}, |
|
"FIGREF4": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Correlation between word prediction loss and attention loss for the input-feeding model.", |
|
"uris": null |
|
}, |
|
"FIGREF5": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "(a) Average attention entropy based on the POS tags. (b) Correlation between attention entropy and attention loss.", |
|
"uris": null |
|
}, |
|
"FIGREF6": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Attention entropy and its correlation with attention loss for the input-feeding system.", |
|
"uris": null |
|
}, |
|
"TABREF0": { |
|
"num": null, |
|
"text": "Statistics of manual alignments provided by RWTH German-English data.", |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table" |
|
}, |
|
"TABREF4": { |
|
"num": null, |
|
"text": "List of the universal POS tags used in our analysis.", |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |