|
{ |
|
"paper_id": "N19-1044", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:02:29.740497Z" |
|
}, |
|
"title": "Code-Switching for Enhancing NMT with Pre-Specified Translation", |
|
"authors": [ |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Soochow University", |
|
"location": { |
|
"settlement": "Suzhou", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "songkai.sk@alibaba-inc.com" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Westlake University", |
|
"location": { |
|
"settlement": "Hangzhou", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "zhangyue@wias.org.cn" |
|
}, |
|
{ |
|
"first": "Heng", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Alibaba DAMO Academy", |
|
"location": { |
|
"settlement": "Hangzhou", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Weihua", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Alibaba DAMO Academy", |
|
"location": { |
|
"settlement": "Hangzhou", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "weihua.luowh@alibaba-inc.com" |
|
}, |
|
{ |
|
"first": "Kun", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Soochow University", |
|
"location": { |
|
"settlement": "Suzhou", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Min", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Soochow University", |
|
"location": { |
|
"settlement": "Suzhou", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "minzhang@suda.edu.cn" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Leveraging user-provided translation to constrain NMT has practical significance. Existing methods can be classified into two main categories, namely the use of placeholder tags for lexicon words and the use of hard constraints during decoding. Both methods can hurt translation fidelity for various reasons. We investigate a data augmentation method, making code-switched training data by replacing source phrases with their target translations. Our method does not change the NMT model or decoding algorithm, allowing the model to learn lexicon translations by copying source-side target words. Extensive experiments show that our method achieves consistent improvements over existing approaches, improving translation of constrained words without hurting unconstrained words.", |
|
"pdf_parse": { |
|
"paper_id": "N19-1044", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Leveraging user-provided translation to constrain NMT has practical significance. Existing methods can be classified into two main categories, namely the use of placeholder tags for lexicon words and the use of hard constraints during decoding. Both methods can hurt translation fidelity for various reasons. We investigate a data augmentation method, making code-switched training data by replacing source phrases with their target translations. Our method does not change the NMT model or decoding algorithm, allowing the model to learn lexicon translations by copying source-side target words. Extensive experiments show that our method achieves consistent improvements over existing approaches, improving translation of constrained words without hurting unconstrained words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "One important research question in domainspecific machine translation (Luong and Manning, 2015) is how to impose translation constraints (Crego et al., 2016; Hokamp and Liu, 2017; Post and Vilar, 2018) . As shown in Figure 1 (a), the word \"breadboard\" can be translated into \"\u5207\u9762 \u5305\u677f (a wooden board that is used to cut bread on)\" in the food domain, but \"\u7535 \u8def \u677f (a construction base for prototyping of electronics)\" in the electronic domain. To enhance translation quality, a lexicon can be leveraged for domainspecific or user-provided words (Arthur et al., 2016; Hasler et al., 2018) . We investigate the method of leveraging pre-specified translation for NMT using such a lexicon.", |
|
"cite_spans": [ |
|
{ |
|
"start": 70, |
|
"end": 95, |
|
"text": "(Luong and Manning, 2015)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 137, |
|
"end": 157, |
|
"text": "(Crego et al., 2016;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 158, |
|
"end": 179, |
|
"text": "Hokamp and Liu, 2017;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 180, |
|
"end": 201, |
|
"text": "Post and Vilar, 2018)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 541, |
|
"end": 562, |
|
"text": "(Arthur et al., 2016;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 563, |
|
"end": 583, |
|
"text": "Hasler et al., 2018)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 216, |
|
"end": 224, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "For leveraging pre-specified translation, one existing approach uses placeholder tags to substitute named entities (Crego et al., 2016; Wang et al., 2017b) or rare words (Luong et al., 2014) on both the source and target sides during training, so that a model can translate such words by learning to translate placeholder tags. For example, the i-th named entity in the source sentence is replaced with \"tag i \", as well as its corresponding translation in the target side. Placeholder tags in the output are replaced with pre-specified translation as a post-processing step. One disadvantage of this approach, however, is that the meaning of the original words in the pre-specified translation is not fully retained, which can be harmful to both adequacy and fluency of the output. Another approach (Hokamp and Liu, 2017; Post and Vilar, 2018) imposes pre-specified translation via lexical constraints, making sure such constraints are satisfied by modifying NMT decoding. This method ensures that pre-specified translations appear in the output. A problem of this method is that it does not explicitly explore the correlation between pre-specified translations and their corresponding source words during decoding, and thus can hurt translation fidelity (Hasler et al., 2018) . There is not a mechanism that allows the model to learn constraint translations during training, which the placeholder method allows.", |
|
"cite_spans": [ |
|
{ |
|
"start": 115, |
|
"end": 135, |
|
"text": "(Crego et al., 2016;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 136, |
|
"end": 155, |
|
"text": "Wang et al., 2017b)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 170, |
|
"end": 184, |
|
"text": "(Luong et al.,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 800, |
|
"end": 822, |
|
"text": "(Hokamp and Liu, 2017;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 823, |
|
"end": 844, |
|
"text": "Post and Vilar, 2018)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 1256, |
|
"end": 1277, |
|
"text": "(Hasler et al., 2018)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We investigate a novel method based on data augmentation, which combines the advantages of both methods above. The idea is to construct synthetic parallel sentences from the original paral-lel training data. The synthetic sentence pairs resemble code-switched source sentences and their translations, where certain source words are replaced with their corresponding target translations. The motivation is to make the model learn to \"translate\" embedded pre-specified translations by copying them from the modified source. During decoding, the source is similarly modified as a preprocessing step. As shown in Figure 1 (b) , translation is executed over the code-switched source, without further constraints or post-processing.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 609, |
|
"end": 621, |
|
"text": "Figure 1 (b)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In contrast to the placeholder method, our method keeps lexical semantic information (i.e. target words v.s. placeholder tags) in the source, which can lead to more adequate translations. Compared with the lexical constraint method, prespecified translation is learned because such information is available both in training and decoding. As a data augmentation method, it can be used on any NMT architecture. In addition, our method enables the model to translate code-switched source sentences, and preserve its strength in translating un-replaced sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To further strengthen copying, we propose two model-level adjustments: First, we share targetside embeddings with source-side target words, so that target vocabulary words have a unique embedding in the NMT system. Second, we integrate pointer network (Vinyals et al., 2015; Gulcehre et al., 2016; Gu et al., 2016; See et al., 2017) into the decoder. The copy mechanism was firstly proposed to copy source words. In our method, it is further used to copy source-side target words.", |
|
"cite_spans": [ |
|
{ |
|
"start": 252, |
|
"end": 274, |
|
"text": "(Vinyals et al., 2015;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 275, |
|
"end": 297, |
|
"text": "Gulcehre et al., 2016;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 298, |
|
"end": 314, |
|
"text": "Gu et al., 2016;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 315, |
|
"end": 332, |
|
"text": "See et al., 2017)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Results on large scale English-to-Russian (En-Ru) and Chinese-to-English (Ch-En) tasks show that our method outperforms both placeholder and lexical constraint methods over a state-of-the-art Transformer (Vaswani et al., 2017 ) model on various test sets across different domains. We also show that shared embedding and pointer network can lead to more successful applications of the copying mechanism. We release four high-quality En-Ru e-commerce test sets translated by Russian language experts, totalling 7169 sentences with an average length of 21 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 204, |
|
"end": 225, |
|
"text": "(Vaswani et al., 2017", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Using placeholders. Luong et al. (2014) use annotated unk tags to present the unk symbols in training corpora, where the correspondence between source and target unk symbols are obtained from word alignment (Brown et al., 1993) . Output unk tags are replaced through a post-processing stage by looking up a pre-specified dictionary or copying the corresponding source word. Crego et al. (2016) extended unk tags symbol to specific symbols that can present name entities. Wang et al. (2017b) and use a similar method. This method is limited when constrain NMT with pre-specified translations consisting of more general words, due to the loss of word meaning when representing them with placeholder tags. In contrast to their work, word meaning is fully kept in modified source in our work.", |
|
"cite_spans": [ |
|
{ |
|
"start": 20, |
|
"end": 39, |
|
"text": "Luong et al. (2014)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 207, |
|
"end": 227, |
|
"text": "(Brown et al., 1993)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 374, |
|
"end": 393, |
|
"text": "Crego et al. (2016)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 471, |
|
"end": 490, |
|
"text": "Wang et al. (2017b)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Lexical constraints. Hokamp and Liu (2017) propose an altered beam search algorithm, namely grid beam search, which takes target-side prespecified translations as lexical constraints during beam search. A potential problem of this method is that translation fidelity is not specifically considered, since there is no indication of a matching source of each pre-specific translation. In addition, decoding speed is significantly reduced (Post and Vilar, 2018) . Hasler et al. (2018) use alignment to gain target-side constraints' corresponding source words, simultaneously use finitestate machines and multi-stack (Anderson et al., 2016) decoding to guide beam search. Post and Vilar (2018) give a fast version of Hokamp and Liu (2017) , which limits the decoding complexity linearly by altering the beam search algorithm through dynamic beam allocation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 42, |
|
"text": "Hokamp and Liu (2017)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 436, |
|
"end": 458, |
|
"text": "(Post and Vilar, 2018)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 461, |
|
"end": 481, |
|
"text": "Hasler et al. (2018)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 613, |
|
"end": 636, |
|
"text": "(Anderson et al., 2016)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 668, |
|
"end": 689, |
|
"text": "Post and Vilar (2018)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 713, |
|
"end": 734, |
|
"text": "Hokamp and Liu (2017)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In contrast to their methods, our method does not make changes to the decoder, and therefore decoding speed remains unchanged. Translation fidelity of pre-specified source words is achieved through a combination of training and decoding procedure, where replaced source-side words still contain their target-side meaning. As a soft method of inserting pre-specified translation, our method does not guarantee that all lexical constraints are satisfied during decoding, but has better overall translation quality compared to their method.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Using probabilistic lexicons. Aiming at making use of one-to-many phrasal translations, the following work is remotely related to our work. Tang et al. (2016) use a phrase memory to provide extra information for their NMT encoder, dynamically switching between word generation and phrase generation during decoding. Wang et al. (2017a) use SMT to recommend prediction for NMT, which contains not only translation operations of a SMT phrase table, but also alignment information and coverage information. Arthur et al. (2016) incorporate discrete lexicons by converting lexicon probabilities into predictive probabilities and linearly interpolating them with NMT probability distributions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 140, |
|
"end": 158, |
|
"text": "Tang et al. (2016)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 316, |
|
"end": 335, |
|
"text": "Wang et al. (2017a)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 504, |
|
"end": 524, |
|
"text": "Arthur et al. (2016)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Our method is similar in the sense that external translations of source phrases are leveraged. However, their tasks are different. In particular, these methods regard one-to-many translation lexicons as a suggestion. In contrast, our task aims to constrain NMT translation through one-to-one prespecified translations. Lexical translations can be used to generate code-switched source sentences during training, but we do not modify NMT models by integrating translation lexicons. In addition, our data augmentation method is more flexible, because it is model-free.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Alkhouli et al. (2018) simulate a dictionaryguided translation task to evaluate NMT's alignment extraction. A one-to-one word translation dictionary is used to guide NMT decoding. In their method, a dictionary entry is limited to only one word on both the source and target sides. In addition, a pre-specified translation can come into effect only if the corresponding source-side word is successfully aligned during decoding.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "On translating named entities, Currey et al. (2017) augment the training data by copying target-side sentences to the source-side, resulting in augmented training corpora where the source and the target sides contain identical sentences. The augmented data is shown to improve translation performance, especially for proper nouns and other words that are identical in the source and target languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Our method is based on data augmentation. During training, augmented data are generated by replacing source words or phrases directly with their corresponding target translations. The motivation is to sample as many code-switched translation pairs as possible. During decoding, given prespecified translations, the source sentence is modified by replacing phrases with their pre-specified translations, so that the trained model can directly copy embedded target translations in the output.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data augmentation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Given a bilingual training corpus, we sample augmented sentence pairs by leveraging a SMT phrase table, which can be trained over the same bilingual corpus or a different large corpus. We extract source-target phrase pairs 2 from the phrase table, replacing source-side phrases of source sentences using the following sampling steps:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "1. Indexing between source-target phrase pairs and training sentences: (a) For each sourcetarget phrase pair, we record all the matching bilingual sentences that contain both the source and target. Word alignment can be used to ensure the phrase pairs that are mutual translation. (b) We also sample bilingual sentences that match two source-target phrase pairs. In particular, given a combination of two phrase pairs, we index bilingual sentences that match both simultaneously.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "2. Sampling: (a) For each source-target phrase pair, we keep at most k 1 randomly selected matching sentences. The source-side phrase is replaced with its target-side translation. (b) For each combination of two source-target phrase pairs, we randomly sample at most k 2 matching sentences. Both source-side matching phrases are replaced with their target translations. 3", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The sampled training data is added to the original training data to form a final set of training sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We impose target-side pre-specified translations to the source by replacing source phrases with their translations. Lexicons are defined in the form of one-to-one source-target phrase pairs. Different from training, the number of replaced phrases in a source sentence is not necessarily restricted to one or two, which will be discussed in Section 5.5. In practice, pre-specified translations can be provided by customers or through user feedback, which contains one identified translation for specified source segment.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decoding", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Transformer (Vaswani et al., 2017) uses selfattention network for both encoding and decod-ing. The encoder is composed of n stacked neural layers. For time step i in layer j, the hidden state h i,j is calculated by employing self-attention over the hidden states in layer j \u2212 1, which are {h 1,j\u22121 , h 2,j\u22121 , ..., h m,j\u22121 }, where m is the number of source-side words.", |
|
"cite_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 34, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In particular, h i,j is calculated as follows: First, a self-attention sub-layer is employed to encode the context. Then attention weights are computed as scaled dot product between the current query h i,j\u22121 and all keys {h 1,j\u22121 , h 2,j\u22121 , ..., h m,j\u22121 }, normalized with a softmax function.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "After that, the context vector is represented as weighted sum of the values projected from hidden states in the previous layer, which are {h 1,j\u22121 , h 2,j\u22121 , ..., h m,j\u22121 }. The hidden state in the previous layer and the context vector are then connected by residual connection, followed by a layer normalization function (Ba et al., 2016) , to produce a candidate hidden state h \u2032 i,j . Finally, another sub-layer including a feed-forward network (FFN) layer, followed by another residual connection and layer normalization, are used to obtain the hidden state h i,j .", |
|
"cite_spans": [ |
|
{ |
|
"start": 323, |
|
"end": 340, |
|
"text": "(Ba et al., 2016)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In consideration of translation quality, multihead attention is used instead of single-head attention as mentioned above, positional encoding is also used to compensate the missing of position information in this model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The decoder is also composed of n stacked layers. For time step t in layer j, a selfattention sub-layer of hidden state s t,j is calculated by employing self-attention mechanism over hidden states in previous target layer, which are", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "{s 1,j\u22121 , s 2,j\u22121 , ..., s t\u22121,j\u22121 }, resulting in candi- date hidden state s \u2032 t,j .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Then, a second target-tosource sub-layer of hidden state s t,j is inserted above the target self-attention sub-layer. In particular, the queries(Q) are projected from s \u2032 t,j , and the keys(K) and values(V ) are projected from the source hidden states in the last layer of encoder, which are {h 1,n , h 2,n , ..., h m,n }. The output state is another candidate hidden state s \u2032\u2032 t,j . Finally, a last feed-forward sub-layer of hidden state s t,j is calculated by employing self-attention over s \u2032\u2032 t,j . A softmax layer based on decoder's last layer s t,n is used to gain a probability distribution P predict over target-side vocabulary.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "p(y t |y 1 , ..., y t\u22121 , x) = softmax(s t,n * W), (1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "where W is the weight matrix which is learned, x ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "i want h 1,1 h 2,1 h 3,1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Shared target embeddings enforces the correspondence between source-side and target-side expressions on the embedding level. As shown in Figure 2, during encoding, source-side target word embeddings are identical to their embeddings in the target-side vocabulary embedding matrix. This makes it easier for the model to copy source-side target words to the output.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 137, |
|
"end": 143, |
|
"text": "Figure", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Shared Target Embeddings", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "To strengthen copying through locating sourceside target words, we integrate pointer network (Gulcehre et al., 2016) into the decoder, as shown in Figure 2 . At each decoding time step t, the target-to-source attention weights \u03b1 t,1 , ..., \u03b1 t,m are utilized as a probability distribution P copy , which models the probability of copying a word from the i-th source-side position. The i-th source-side position may represent a source-side word or a source-side target word. P copy is added to P predict , the probability distribution over targetside vocabulary, to gain a new distribution over both the source and the target side vocabulary 4 :", |
|
"cite_spans": [ |
|
{ |
|
"start": 93, |
|
"end": 116, |
|
"text": "(Gulcehre et al., 2016)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 147, |
|
"end": 155, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Pointer Network", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P = (1 \u2212 g pred ) * P copy + g pred * P predict ,", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Pointer Network", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "where g pred is used to control the contribution of two probability distributions. For time step t, g pred is calculated from the context vector c t and the current hidden state of the decoder's last layer s t,n :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pointer Network", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "g pred = \u03c3(c t * W p + s t,n * W q + b r ), (3)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pointer Network", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "where W p , W q , and b r are parameters trained and \u03c3 is the sigmoid function. In addition, the context vector c t is calculated as c t =", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pointer Network", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "m i=1 \u03b1 t,i * h i,n , where \u03b1 t,i is attention weight mentioned earlier. {h 1,n , h 2,n , .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pointer Network", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": ".., h m,n } are the source-side hidden states of the encoder's last layer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pointer Network", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We compare our method with strong baselines on large-scale En-Ru and Ch-En tasks on various test sets across different domains, using a strongly optimized Transformer (Vaswani et al., 2017) . BLEU (Papineni et al., 2002) is used for evaluation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 167, |
|
"end": 189, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 197, |
|
"end": 220, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Our training corpora are taken from the WMT2018 news translation task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "En-Ru. We use 13.88M sentences as baseline training data, containing both a real bilingual corpus and a synthetic back-translation corpus (Sennrich et al., 2015a) . The synthetic corpus is translated from \"NewsCommonCrawl\", which can be obtained from the WMT task. The news domain contains four different test sets published by WMT2018 over the recent years, namely \"news2015\", \"news2016\", \"news2017\", and \"news2018\", respectively, each having one reference. The e-commerce domain contains four files totalling 7169 sentences, namely \"sub-ject17\", \"desc17\", \"subject18\", and \"desc18\", respectively, each having one reference. The sentences are extracted from e-commerce websites, in which \"subject\"s are the goods names shown on a listing page. \"desc\"s refer to information in a commodity's description page. \"subject17\" and \"desc17\" are released 5 . Our development set is \"news2015\".", |
|
"cite_spans": [ |
|
{ |
|
"start": 138, |
|
"end": 162, |
|
"text": "(Sennrich et al., 2015a)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Ch-En. We use 7.42M sentences as our baseline training data, containing both real bilingual corpus and synthetic back-translation corpus (Sennrich et al., 2015a). We use seven public development and test data sets, four in the news domain, namely \"NIST02\", \"NIST03\", \"NIST04\", \"NIST05\", respectively, each with four references, and three in the spoken language domain, namely \"CSTAR03\", \"IWSLT2004\", \"IWLST2005\", respectively, each with 16 references. \"NIST03\" is used for development.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We use six self-attention layers for both the encoder and the decoder. The embedding size and the hidden size are set to 512. Eight heads are used for self-attention. A feed-forward layer with 2048 cells and Swish (Ramachandran et al., 2018) is used as the activation function. Adam (Kingma and Ba, 2014) is used for training; warmup step is 16000; the learning rate is 0.0003. We use label smoothing (Junczys-Dowmunt et al., 2016) with a confidence score of 0.9, and all the drop-out (Gal and Ghahramani, 2016) probabilities are set to 0.1.", |
|
"cite_spans": [ |
|
{ |
|
"start": 214, |
|
"end": 241, |
|
"text": "(Ramachandran et al., 2018)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 485, |
|
"end": 511, |
|
"text": "(Gal and Ghahramani, 2016)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Settings", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We extract a SMT phrase table on the bilingual training corpus by using moses (Koehn et al., 2007) with default setting, which is used for matching sentence pairs to generate augmented training data. We apply count-based pruning (Zens et al., 2012) to the phrase table, the threshold is set to 10.", |
|
"cite_spans": [ |
|
{ |
|
"start": 78, |
|
"end": 98, |
|
"text": "(Koehn et al., 2007)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 229, |
|
"end": 248, |
|
"text": "(Zens et al., 2012)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Settings", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "During decoding, similar to Hasler et al. 2018, Alkhouli et al. (2018) and Post and Vilar (2018), we make use of references to obtain gold constraints. Following previous work, prespecified translations for each source sentence are sampled from references and used by all systems for fair comparison.", |
|
"cite_spans": [ |
|
{ |
|
"start": 48, |
|
"end": 70, |
|
"text": "Alkhouli et al. (2018)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Settings", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "In all the baseline systems, the vocabulary size is set to 50K on both sides. For \"Data augmentation\", to allow the source-side dictionary to cover target-side words, the target-and source-side vocabularies are merged for a new source vocabulary. For \"Shared embeddings\", the source vocabulary remains the same as the baselines, where the source-side target words use embeddings from target-side vocabulary.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Settings", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We use an in-house reimplementation of Transformer, similar to Google's Tensor2Tensor. For the baselines, we reimplement Crego et al. (2016) , as well as Post and Vilar (2018) . BPE (Sennrich et al., 2015b) is used for all experiments, the operation is set to 50K. Our test sets cover news and ecommerce domains on En-Ru, and news and spoken language domains on Ch-En.", |
|
"cite_spans": [ |
|
{ |
|
"start": 121, |
|
"end": 140, |
|
"text": "Crego et al. (2016)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 154, |
|
"end": 175, |
|
"text": "Post and Vilar (2018)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 182, |
|
"end": 206, |
|
"text": "(Sennrich et al., 2015b)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Configurations", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Baseline 1: Using Placeholder. We combine Luong et al. (2014) and Crego et al. (2016) generating placeholder tags during training, following Crego et al. (2016) , we use a named entity translation dictionary which is extracted from Wikidata 6 . The dictionary is released together with e-commerce test sets, which is mentioned before. For Ch-En, the dictionary contains 285K person names, 746K location names and 1.6K organization names. For En-Ru, the dictionary contains 471K person names, 254K location names and 1.5K organization names. Additionally, we manually corrected a dictionary which contains 142K brand names and product names translation for En-Ru. By further leveraging word alignment in the same way as Luong et al. (2014) , the placeholder tags are annotated with indices. We use FastAlign (Dyer et al., 2013) to generate word alignment. The amount of sentences containing placeholder tags is controlled to a ratio of 5% of the corpus. During decoding, pre-specified translations described in Section 5.2 are used. Baseline 2: Lexical Constraints. We reimplement Post and Vilar (2018) , integrating their algorithm into our Transformer. Target-side words or phrases of pre-specified translations mentioned in Section 5.2 are used as lexical constraints.", |
|
"cite_spans": [ |
|
{ |
|
"start": 42, |
|
"end": 61, |
|
"text": "Luong et al. (2014)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 66, |
|
"end": 85, |
|
"text": "Crego et al. (2016)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 141, |
|
"end": 160, |
|
"text": "Crego et al. (2016)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 719, |
|
"end": 738, |
|
"text": "Luong et al. (2014)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 807, |
|
"end": 826, |
|
"text": "(Dyer et al., 2013)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 1080, |
|
"end": 1101, |
|
"text": "Post and Vilar (2018)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Configurations", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Our System. During training, we use the method described in Section 3.1 to obtain the augmented training data. The SMT phrase table mentioned in Section 5.2 is used for \"Indexing\" and \"Sampling\". During decoding, pre-specified translations mentioned in Section 5.2 are used. The augmented data contain sampled sentences with one or two replacements on the source side. By applying the two sampling steps described in Section 3.1, about 10M and 6M augmented Ch-En and En-Ru sentences are generated, respectively. The final training corpora consists of both the augmented training data and the original training data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Configurations", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Comparison with Baselines. Our Transformer implementation can give comparable performance with state-of-the-art NMT (Junczys-Dowmunt et al., 2018), see \"Transformer\" and \"Marian\" in Table 1 , which also shows a comparison of different methods on En-Ru. The lexical constraint method gives improvements on both the news and the e-commerce domains, compared with the Transformer baseline. The placeholder method also gives an improvement on the e-commerce domain. The average improvement is calculated over all the test set results in each domain. In the news domain, the average improvement of our method is 3.48 BLEU higher compared with placeholder, and 2.94 over lexical constraints. In the e-commerce domain, the average improvement of our method is 1.34 BLEU compared with placeholder, and 2.63 with lexical constraints. Both shared embedding and pointer network are effective. Table 2 shows the same comparison on Ch-En. In the spoken language domain, the average improvement is 1.35 BLEU compared with placeholder, and 0.42 with lexical constraints. In the news domain, the average improvement is 1.38 BLEU compared with placeholder, and 0.74 with lexical constraints.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 182, |
|
"end": 189, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 882, |
|
"end": 889, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "We find that the placeholder method can only bring improvements on the En-Ru e-commerce test sets, since the pre-specified translations of the four e-commerce test sets are mostly entities, such as brand names or product names. Using placeholder tags to represent these entities leads to relatively little loss of word meaning. But on many of the other test sets, pre-specified translations are mostly vocabulary words. The placeholder tags fail to keep their word meaning during translation, leading to lower results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "The speed contrast between unconstrained NMT, lexical constraint and our method is shown in Table 3 . The decoding speed of our method is equal to unconstrained NMT, and faster than the lexical constraint method, which confirms our in- Figure 3 gives a comparison of different system's translations. Given a Chinese source sentence, the baseline system fails to translate \"\u8ba1\u5212\u751f\u80b2\" adequately, as \"family planning\" is not a correct translation of \"\u8ba1\u5212\u751f\u80b2\". In the pre-specified methods, the correct translation (\"\u8ba1\u5212\u751f\u80b2\" to \"planned parenthood\") is achieved through different ways.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 92, |
|
"end": 99, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 236, |
|
"end": 244, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "For the placeholder method, the source phrase \"\u8ba1\u5212\u751f\u80b2\" is replaced with the placeholder tag \"tag 1 \" during pre-processing. After translation, output \"tag 1 \" is replaced with \"planned parenthood\" as a post-processing step. However, the underlined word \"program\" is generated before \"planned parenthood\", which has no relationship with any source-side word. The source-side word \"\u534f\u4f1a\", which means \"association\", is omitted in translation. Through deeper analysis, the specific phrase \"program tag 1 \" occurs frequently in the training data. During decoding, using the hard tag leads to the loss of the source phrase's original meaning. As a result, the word \"program\" is incorrectly generated along with \"tag 1 \".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "The lexical constraints method regards the tar- get side of the pre-specified translation as a lexical constraint. Here the altered beam search algorithm fails to predict the constraint \"planned parenthood\" during previous decoding steps. Although the constraint finally comes into effect, over translation occurs, which is highlighted by the underlined words. This is because the method enforces hard constraints, preventing decoding to stop until all constraints are met.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "Our method makes use of pre-specified translation by replacing the source-side phrase \"\u8ba1\u5212\u751f \u80b2\" with the target-side translation \"planned parenthood\", copying the desired phrase to the output along with the decoding procedure. The translation \"association of planned parenthood from providing\" is the exact translation of the sourceside phrase \"\u8ba1\u5212(planned) \u751f\u80b2(parenthood) \u534f \u4f1a(association) \u63d0\u4f9b(providing)\", and agrees with the reference, \"planned parenthood to provide\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "Effect of Using More Pre-specified Translations. Even though the augmented training data have only one or two replacements on the source side, the model can translate a source sentence with up to five replacements. Figure 4 shows that compared with unconstrained Transformer, the translation quality of our method keeps increasing when the number of replacements increases, since more pre-specified translations are used.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 215, |
|
"end": 223, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Analysis", |
|
"sec_num": "5.5" |
|
}, |
|
{ |
|
"text": "We additionally measure the effect on the Ch-En WMT test sets, namely \"newsdev2017\", \"new-stest2017\", \"newstest2018\", respectively, each having only one reference instead of four. The baseline BLEU scores on these three test sets are 18.49, 20.01 and 19.05, respectively. Our method gives BLEU scores of 20.56, 22.3, 21.08, respectively, when using one or two pre-specified translations for each sentence. The increased BLEU when utilizing different number of pre-specified translations is shown in Figure 4 . We found that the improvements on WMT test sets are more significant than on NIST, since pre-specified translations are sampled from one reference only, enforcing the output to match this reference. The placeholder method does not give consistent improvements on news test sets, due to the same reason as mentioned earlier.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 499, |
|
"end": 507, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Analysis", |
|
"sec_num": "5.5" |
|
}, |
|
{ |
|
"text": "As shown in Figure 5 , the copy success rate of our method does not decrease significantly when the number of replacements grows. Here, a copy success refers a pre-specified target translation that can occur in the output. The placeholder method achieves a higher copy success rate than ours when the number of replacements is 1, but the copy success rate decreases when using more pre-specified translations. The copy success rate of the lexical constraint method is always 100%, since it imposes hard constraints rather than soft constraints. However, as discussed earlier, overall translation quality can be harmed as a cost of satisfying decoding constraints by their method.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 20, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Analysis", |
|
"sec_num": "5.5" |
|
}, |
|
{ |
|
"text": "In the presented experiment results, the highest copy success rate of our method is 90.54%, which means a number of source-side target words or phrases are not successfully copied to the translation output. This may be caused by the lack of training samples for certain target-side words or phrases. In En-Ru, we additionally train a model with augmented data that is obtained by matching NIST02 NIST03 NIST04 NIST05 Data Aug. 83.89% 85.71% 86.71% 87.45% +Share&Point 87.72% 88.31% 89.18% 90.54% an SMT phrase table without any pruning strategy. The copy success rate can reach 98%, even without using \"shared embedding\" and \"pointer network\" methods. Effect of Shared Embeddings and Pointer Network. The gains of shared embeddings and pointer network are reflected in both the copy success rate and translation quality. As shown in Table 4, when using one pre-specified translation for each source sentence, the copy success rate improves on various test sets by integrating shared embeddings and pointer network, demonstrating that more pre-specified translations come into effect. Table 1 and Table 2 earlier show the improvement of translation quality.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1084, |
|
"end": 1103, |
|
"text": "Table 1 and Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Analysis", |
|
"sec_num": "5.5" |
|
}, |
|
{ |
|
"text": "Translating non Code-Switched Sentences. Our method preserves its strength on translating non code-switched sentences. As shown in Table 5, the model trained on the augmented corpus has comparable strength on translating unreplaced sentences as the model trained on the original corpus. In addition, on some test sets, our method is slightly better than the baseline when translating non code-switched source sentences. This can be explained from two aspects: First, the augmented data make the model more robust to perturbed inputs; Second, the pointer network makes the model better by copying certain sourceside words (Gulcehre et al., 2016) , such as nontransliterated named entities.", |
|
"cite_spans": [ |
|
{ |
|
"start": 621, |
|
"end": 644, |
|
"text": "(Gulcehre et al., 2016)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Analysis", |
|
"sec_num": "5.5" |
|
}, |
|
{ |
|
"text": "We investigated a data augmentation method for constraining NMT with pre-specified translations, utilizing code-switched source sentences and their translations as augmented training data. Our method allows the model to learn to translate source-side target phrases by \"copying\" them to the output, achieving consistent improvements over previous lexical constraint methods on large NMT test sets. To the best of our knowledge, we are the first to leverage code switching for NMT with pre-specified translations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In the future, we will study how the copy success rate and the BLEU scores interact when different sampling strategies are taken to obtain augmented training corpus and when the amount of augmented data grows. Another direction is to validate the performance when applying this approach to language pairs that contain a number of identical letters in their alphabets, such as English to French and English to Italian.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Future Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "To best of our knowledge, this is the first public ecommerce test set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Source-side phrase is at most trigram.3 We set k1 = 100, k2 = 30 empirically.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For the words which belong to the source-side vocabulary but are not appeared in the source-side sentence, the probabilities are set to 0.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/batman2013/ e-commerce_test_sets", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://www.wikidata.org", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We thank the anonymous reviewers for their detailed and constructed comments. Yue Zhang is the corresponding author. The research work is supported by the National Natural Science Foundation of China (61525205). Thanks for Shaohui Kuang, Qian Cao, Zhongqiang Huang and Fei Huang for their useful discussion.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "On the alignment problem in multi-head attention-based neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Tamer", |
|
"middle": [], |
|
"last": "Alkhouli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gabriel", |
|
"middle": [], |
|
"last": "Bretschner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1809.03985" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tamer Alkhouli, Gabriel Bretschner, and Hermann Ney. 2018. On the alignment problem in multi-head attention-based neural machine translation. arXiv preprint arXiv:1809.03985.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Guided open vocabulary image captioning with constrained beam search", |
|
"authors": [ |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Anderson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Basura", |
|
"middle": [], |
|
"last": "Fernando", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Gould", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter Anderson, Basura Fernando, Mark Johnson, and Stephen Gould. 2016. Guided open vocabulary image captioning with constrained beam search. CoRR, abs/1612.00576.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Incorporating discrete translation lexicons into neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Arthur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graham", |
|
"middle": [], |
|
"last": "Neubig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Satoshi", |
|
"middle": [], |
|
"last": "Nakamura", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1606.02006" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philip Arthur, Graham Neubig, and Satoshi Naka- mura. 2016. Incorporating discrete translation lexi- cons into neural machine translation. arXiv preprint arXiv:1606.02006.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "The mathematics of statistical machine translation: Parameter estimation", |
|
"authors": [ |
|
{ |
|
"first": "Vincent J Della", |
|
"middle": [], |
|
"last": "Peter F Brown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen A Della", |
|
"middle": [], |
|
"last": "Pietra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert L", |
|
"middle": [], |
|
"last": "Pietra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mercer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "Computational linguistics", |
|
"volume": "19", |
|
"issue": "2", |
|
"pages": "263--311", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter F Brown, Vincent J Della Pietra, Stephen A Della Pietra, and Robert L Mercer. 1993. The mathemat- ics of statistical machine translation: Parameter esti- mation. Computational linguistics, 19(2):263-311.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Systran's pure neural machine translation systems", |
|
"authors": [ |
|
{ |
|
"first": "Yongchao", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1610.05540" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yongchao Deng, et al. 2016. Systran's pure neu- ral machine translation systems. arXiv preprint arXiv:1610.05540.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Copied monolingual data improves low-resource neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Currey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Valerio Miceli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenneth", |
|
"middle": [], |
|
"last": "Barone", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Heafield", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Second Conference on Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "148--156", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anna Currey, Antonio Valerio Miceli Barone, and Ken- neth Heafield. 2017. Copied monolingual data im- proves low-resource neural machine translation. In Proceedings of the Second Conference on Machine Translation, pages 148-156.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "A simple, fast, and effective reparameterization of ibm model 2", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Chahuneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah A", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "644--648", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris Dyer, Victor Chahuneau, and Noah A Smith. 2013. A simple, fast, and effective reparameteriza- tion of ibm model 2. In Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 644-648.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "A theoretically grounded application of dropout in recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Yarin", |
|
"middle": [], |
|
"last": "Gal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zoubin", |
|
"middle": [], |
|
"last": "Ghahramani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1019--1027", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yarin Gal and Zoubin Ghahramani. 2016. A theoret- ically grounded application of dropout in recurrent neural networks. In Advances in neural information processing systems, pages 1019-1027.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Incorporating copying mechanism in sequence-to-sequence learning", |
|
"authors": [ |
|
{ |
|
"first": "Jiatao", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhengdong", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Victor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1603.06393" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiatao Gu, Zhengdong Lu, Hang Li, and Victor OK Li. 2016. Incorporating copying mechanism in sequence-to-sequence learning. arXiv preprint arXiv:1603.06393.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Pointing the unknown words", |
|
"authors": [ |
|
{ |
|
"first": "Caglar", |
|
"middle": [], |
|
"last": "Gulcehre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sungjin", |
|
"middle": [], |
|
"last": "Ahn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ramesh", |
|
"middle": [], |
|
"last": "Nallapati", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bowen", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1603.08148" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Caglar Gulcehre, Sungjin Ahn, Ramesh Nallap- ati, Bowen Zhou, and Yoshua Bengio. 2016. Pointing the unknown words. arXiv preprint arXiv:1603.08148.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Neural machine translation decoding with terminology constraints", |
|
"authors": [ |
|
{ |
|
"first": "Eva", |
|
"middle": [], |
|
"last": "Hasler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adri\u00e0", |
|
"middle": [], |
|
"last": "De Gispert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gonzalo", |
|
"middle": [], |
|
"last": "Iglesias", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bill", |
|
"middle": [], |
|
"last": "Byrne", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1805.03750" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eva Hasler, Adri\u00e0 De Gispert, Gonzalo Iglesias, and Bill Byrne. 2018. Neural machine translation de- coding with terminology constraints. arXiv preprint arXiv:1805.03750.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Lexically constrained decoding for sequence generation using grid beam search", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Hokamp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qun", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1704.07138" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris Hokamp and Qun Liu. 2017. Lexically con- strained decoding for sequence generation using grid beam search. arXiv preprint arXiv:1704.07138.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "The amu-uedin submission to the wmt16 news translation task: Attention-based nmt models as feature functions in phrase-based smt", |
|
"authors": [ |
|
{ |
|
"first": "Marcin", |
|
"middle": [], |
|
"last": "Junczys-Dowmunt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomasz", |
|
"middle": [], |
|
"last": "Dwojak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1605.04809" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcin Junczys-Dowmunt, Tomasz Dwojak, and Rico Sennrich. 2016. The amu-uedin submission to the wmt16 news translation task: Attention-based nmt models as feature functions in phrase-based smt. arXiv preprint arXiv:1605.04809.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Marian: Fast neural machine translation in C++", |
|
"authors": [ |
|
{ |
|
"first": "Marcin", |
|
"middle": [], |
|
"last": "Junczys-Dowmunt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roman", |
|
"middle": [], |
|
"last": "Grundkiewicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomasz", |
|
"middle": [], |
|
"last": "Dwojak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Hoang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenneth", |
|
"middle": [], |
|
"last": "Heafield", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Neckermann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frank", |
|
"middle": [], |
|
"last": "Seide", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ulrich", |
|
"middle": [], |
|
"last": "Germann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alham", |
|
"middle": [], |
|
"last": "Fikri Aji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikolay", |
|
"middle": [], |
|
"last": "Bogoychev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Andr\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Martins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of ACL 2018, System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "116--121", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcin Junczys-Dowmunt, Roman Grundkiewicz, Tomasz Dwojak, Hieu Hoang, Kenneth Heafield, Tom Neckermann, Frank Seide, Ulrich Germann, Alham Fikri Aji, Nikolay Bogoychev, Andr\u00e9 F. T. Martins, and Alexandra Birch. 2018. Marian: Fast neural machine translation in C++. In Proceedings of ACL 2018, System Demonstrations, pages 116- 121, Melbourne, Australia. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1412.6980" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Moses: Open source toolkit for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Hoang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcello", |
|
"middle": [], |
|
"last": "Federico", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicola", |
|
"middle": [], |
|
"last": "Bertoldi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brooke", |
|
"middle": [], |
|
"last": "Cowan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wade", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christine", |
|
"middle": [], |
|
"last": "Moran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Zens", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 45th annual meeting of the ACL on interactive poster and demonstration sessions", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "177--180", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn, Hieu Hoang, Alexandra Birch, Chris Callison-Burch, Marcello Federico, Nicola Bertoldi, Brooke Cowan, Wade Shen, Christine Moran, Richard Zens, et al. 2007. Moses: Open source toolkit for statistical machine translation. In Pro- ceedings of the 45th annual meeting of the ACL on interactive poster and demonstration sessions, pages 177-180. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Neural name translation improves neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Xiaoqing", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiajun", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chengqing", |
|
"middle": [], |
|
"last": "Zong", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1607.01856" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaoqing Li, Jiajun Zhang, and Chengqing Zong. 2016. Neural name translation improves neural machine translation. arXiv preprint arXiv:1607.01856.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Stanford neural machine translation systems for spoken language domains", |
|
"authors": [ |
|
{ |
|
"first": "Minh-Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the International Workshop on Spoken Language Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "76--79", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Minh-Thang Luong and Christopher D Manning. 2015. Stanford neural machine translation systems for spo- ken language domains. In Proceedings of the In- ternational Workshop on Spoken Language Transla- tion, pages 76-79.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Addressing the rare word problem in neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Minh-Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Quoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wojciech", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zaremba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1410.8206" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Minh-Thang Luong, Ilya Sutskever, Quoc V Le, Oriol Vinyals, and Wojciech Zaremba. 2014. Addressing the rare word problem in neural machine translation. arXiv preprint arXiv:1410.8206.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Bleu: a method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proc. ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic eval- uation of machine translation. In Proc. ACL, pages 311-318, Philadelphia, Pennsylvania, USA.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Fast lexically constrained decoding with dynamic beam allocation for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Post", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Vilar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1804.06609" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matt Post and David Vilar. 2018. Fast lexically constrained decoding with dynamic beam alloca- tion for neural machine translation. arXiv preprint arXiv:1804.06609.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Searching for activation functions", |
|
"authors": [ |
|
{ |
|
"first": "Prajit", |
|
"middle": [], |
|
"last": "Ramachandran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barret", |
|
"middle": [], |
|
"last": "Zoph", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc V", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Prajit Ramachandran, Barret Zoph, and Quoc V Le. 2018. Searching for activation functions.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Get to the point: Summarization with pointer-generator networks", |
|
"authors": [ |
|
{ |
|
"first": "Abigail", |
|
"middle": [], |
|
"last": "See", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1704.04368" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abigail See, Peter J Liu, and Christopher D Man- ning. 2017. Get to the point: Summarization with pointer-generator networks. arXiv preprint arXiv:1704.04368.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Improving neural machine translation models with monolingual data", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Computer Science", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2015a. Improving neural machine translation mod- els with monolingual data. Computer Science.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Neural machine translation of rare words with subword units", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1508.07909" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2015b. Neural machine translation of rare words with subword units. arXiv preprint arXiv:1508.07909.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Neural machine translation with external phrase memory", |
|
"authors": [ |
|
{ |
|
"first": "Yaohua", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fandong", |
|
"middle": [], |
|
"last": "Meng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhengdong", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip Lh", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1606.01792" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yaohua Tang, Fandong Meng, Zhengdong Lu, Hang Li, and Philip LH Yu. 2016. Neural machine transla- tion with external phrase memory. arXiv preprint arXiv:1606.01792.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Pointer networks", |
|
"authors": [ |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Meire", |
|
"middle": [], |
|
"last": "Fortunato", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Navdeep", |
|
"middle": [], |
|
"last": "Jaitly", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2692--2700", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oriol Vinyals, Meire Fortunato, and Navdeep Jaitly. 2015. Pointer networks. In Advances in Neural In- formation Processing Systems, pages 2692-2700.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Neural machine translation advised by statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Xing", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhengdong", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhaopeng", |
|
"middle": [], |
|
"last": "Tu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deyi", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3330--3336", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xing Wang, Zhengdong Lu, Zhaopeng Tu, Hang Li, Deyi Xiong, and Min Zhang. 2017a. Neural machine translation advised by statistical machine translation. In AAAI, pages 3330-3336.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Sogou neural machine translation systems for wmt17", |
|
"authors": [ |
|
{ |
|
"first": "Yuguang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shanbo", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liyang", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiajun", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Muze", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lin", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanfeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongtao", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Second Conference on Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "410--415", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yuguang Wang, Shanbo Cheng, Liyang Jiang, Jia- jun Yang, Wei Chen, Muze Li, Lin Shi, Yanfeng Wang, and Hongtao Yang. 2017b. Sogou neural ma- chine translation systems for wmt17. In Proceed- ings of the Second Conference on Machine Transla- tion, pages 410-415.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "A systematic comparison of phrase table pruning techniques", |
|
"authors": [ |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Zens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daisy", |
|
"middle": [], |
|
"last": "Stanton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "972--983", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Richard Zens, Daisy Stanton, and Peng Xu. 2012. A systematic comparison of phrase table pruning tech- niques. In Proceedings of the 2012 Joint Confer- ence on Empirical Methods in Natural Language Processing and Computational Natural Language Learning, pages 972-983. Association for Compu- tational Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"text": "Figure 1: Constrained NMT", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"text": "Shared embeddings and pointer network represent the source sentence, {y 1 , y 2 , ..., y t } represent target words.", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"text": "Sample outputs.", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF3": { |
|
"type_str": "figure", |
|
"text": "Increased BLEU on Ch-En test sets.", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF4": { |
|
"type_str": "figure", |
|
"text": "Copy success rate on Ch-En test sets.", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>. For</td></tr></table>", |
|
"text": "Placeholder 33.14 32.07 36.24 32.03 -0.15 9.81 24.04 13.84 29.34 +1.27 + Lexi. Cons. 33.50 32.62 36.65 32.88 +0.39 9.24 34.37 39.02 34.44 +2.26 10.82 25.84 15.20 30.97 +2.72 + Share&Point 36.44 35.31 40.23 35.43 +3.33 11.58 26.53 16.08 32.17 +3.61Results on En-Ru, one or two source phrases of each sentence have pre-specified translation. \"Transformer\" is our in-house vanilla Transformer baseline. \"Marian\" is the implementation of Transformer byJunczys- Dowmunt et al. (2018), which is used as a reference of our Transformer implementation.", |
|
"num": null |
|
}, |
|
"TABREF2": { |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"text": "", |
|
"num": null |
|
}, |
|
"TABREF4": { |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>tuition introduced earlier.</td></tr><tr><td>Sample Outputs.</td></tr></table>", |
|
"text": "Decoding speed (words/sec), Ch-En dev set.", |
|
"num": null |
|
}, |
|
"TABREF5": { |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td colspan=\"4\">news15 news16 news17 news18</td></tr><tr><td colspan=\"2\">Baseline 33.29</td><td>31.95</td><td>36.57</td><td>32.27</td></tr><tr><td>Ours</td><td>33.53</td><td>32.29</td><td>36.54</td><td>32.47</td></tr></table>", |
|
"text": "Copy success rate on Ch-En test sets.", |
|
"num": null |
|
}, |
|
"TABREF6": { |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"text": "BLEU scores of non code-switched (original) input on En-Ru test sets.", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |