|
{ |
|
"paper_id": "I17-1003", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:40:02.859350Z" |
|
}, |
|
"title": "Improving Sequence to Sequence Neural Machine Translation by Utilizing Syntactic Dependency Information", |
|
"authors": [ |
|
{ |
|
"first": "An", |
|
"middle": [ |
|
"Nguyen" |
|
], |
|
"last": "Le", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ander", |
|
"middle": [], |
|
"last": "Martinez", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "ander.martinez.zy4@is.naist.jp" |
|
}, |
|
{ |
|
"first": "Akifumi", |
|
"middle": [], |
|
"last": "Yoshimoto", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "This author's present affiliation is CyberAgent, Inc", |
|
"institution": "", |
|
"location": { |
|
"settlement": "Tokyo", |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "akifumi-y@is.naist.jp" |
|
}, |
|
{ |
|
"first": "Yuji", |
|
"middle": [], |
|
"last": "Matsumoto", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Sequence to Sequence Neural Machine Translation has achieved significant performance in recent years. Yet, there are some existing issues that Neural Machine Translation still does not solve completely. Two of them are translation of long sentences and \"over-translation\". To address these two problems, we propose an approach that utilize more grammatical information such as syntactic dependencies, so that the output can be generated based on more abundant information. In addition, the output of the model is presented not as a simple sequence of tokens but as a linearized tree construction. Experiments on the Europarl-v7 dataset of French-to-English translation demonstrate that our proposed method improves BLEU scores by 1.57 and 2.40 on datasets consisting of sentences with up to 50 and 80 tokens, respectively. Furthermore, the proposed method also solved the two existing problems, ineffective translation of long sentences and over-translation in Neural Machine Translation.", |
|
"pdf_parse": { |
|
"paper_id": "I17-1003", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Sequence to Sequence Neural Machine Translation has achieved significant performance in recent years. Yet, there are some existing issues that Neural Machine Translation still does not solve completely. Two of them are translation of long sentences and \"over-translation\". To address these two problems, we propose an approach that utilize more grammatical information such as syntactic dependencies, so that the output can be generated based on more abundant information. In addition, the output of the model is presented not as a simple sequence of tokens but as a linearized tree construction. Experiments on the Europarl-v7 dataset of French-to-English translation demonstrate that our proposed method improves BLEU scores by 1.57 and 2.40 on datasets consisting of sentences with up to 50 and 80 tokens, respectively. Furthermore, the proposed method also solved the two existing problems, ineffective translation of long sentences and over-translation in Neural Machine Translation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Our task is to construct a model which learns input in sequence form and decodes output as a linearized dependency tree. In this work, we propose an approach in which dependency labels are incorporated into the model to represent more grammatical information in the output sequence. As we know, the Sequence to Sequence (Seq2Seq) Learning model Aharoni et al., 2016) is extremely effective on a va-riety of tasks that require a mapping between a sequence to sequence. Therefore, it is used to solve many tasks in natural language processing. The Seq2Seq model consists of an encoder-decoder neural network which encodes a variable-length input sequence into a vector and decodes it into a variable-length output. Since the model uses the information of the source representation and the previously generated words to produce the next-word token, this distributed representation allows the Seq2Seq model to generate appropriate mapping between the input and the output (Li et al., 2016) . For specific tasks, Neural Machine Translation (NMT) model, which is based on the Seq2Seq learning, has achieved excellent translation performance in recent years Bahdanau et al., 2015; Luong et al., 2015; Firat et al., 2016) . In particular, the NMT model which is built upon an encoder-decoder framework with attention mechanism (Bahdanau et al., 2015) can also pay attention and its decoder knows which part of the input is relevant for the word that is currently being translated. Therefore, it has shown competitive results and outperformed conventional statistical methods (Bentivogli et al., 2016) . Despite of these advantages, NMT model still has a couple particular issues to be solved such as dealing with fixed vocabulary, not applicable to small-data, additional phrases, wrong lexical choice errors, long sentence translation, over and under translation, etc. In this paper, we touch upon the following two major problems:", |
|
"cite_spans": [ |
|
{ |
|
"start": 345, |
|
"end": 366, |
|
"text": "Aharoni et al., 2016)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 968, |
|
"end": 985, |
|
"text": "(Li et al., 2016)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1151, |
|
"end": 1173, |
|
"text": "Bahdanau et al., 2015;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1174, |
|
"end": 1193, |
|
"text": "Luong et al., 2015;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 1194, |
|
"end": 1213, |
|
"text": "Firat et al., 2016)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 1319, |
|
"end": 1342, |
|
"text": "(Bahdanau et al., 2015)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1567, |
|
"end": 1592, |
|
"text": "(Bentivogli et al., 2016)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Translation of long sentences", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Over-translation Since the decoder of the Seq2Seq model produces the target language word by word simply based on the previous target words and the sourceside representation vector until it reaches the spe-cial end token, it is incapable in capturing longdistance dependencies in history, so ineffective for long sentences translation Toral and S\u00e1nchez-Cartagena, 2017) . Even with an attention mechanism, the Seq2Seq model just pays attention to the current alignment information between the inputs and the output at the current position but ignores past alignments information. Therefore, it cannot keep track of the attention history when it updates information at each current time step, leading to the over-production (Tu et al., 2016a,c; Mi et al., 2016; Tu et al., 2016b) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 337, |
|
"end": 371, |
|
"text": "Toral and S\u00e1nchez-Cartagena, 2017)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 725, |
|
"end": 745, |
|
"text": "(Tu et al., 2016a,c;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 746, |
|
"end": 762, |
|
"text": "Mi et al., 2016;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 763, |
|
"end": 780, |
|
"text": "Tu et al., 2016b)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In order to address the above two issues, it is worth considering that using syntactic dependency information and representing the output as a tree structure would be effective. This approach allows the next tokens to be output based on not only the previous tokens but also the syntactic dependencies so far, thereby conditioning them on more abundant information so it has the ability to make smarter predictions. Basically, in this paper, we train the model with an encoder-decoder neural network and using dependencies in which the input of the source language is in sequence form and the output of the target language will be generated in a linearized dependency-based tree structure. That is, instead of predicting only words at each time step, the model trains the network to predict both words and their grammatical dependencies as a dependency tree at each time step. Therefore, it is hoped that the accuracy of output will be improved.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The major contributions of this work are as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "1. To utilize the information of both \"head\" words and syntactic dependencies between them to produce better output.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "2. To settle the problems in the NMT task. In this paper, we desire to solve two tasks. First is the ineffective translation for long sentences. Second is the over-translation in NMT task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Empirically, to assess the performance of the proposed method, we used Conditional Gated Recurrent Unit with Attention mechanism model of Bahdanau (2015) on the French-English portions of the Europarl-v7 dataset. As a result, the BLEU score is improved by 1.57 and 2.40 points for sentences of length up to 50 and 80 tokens, respec-tively. Also, we compare and analyze the results of attention-based Seq2Seq model and the proposed approach.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In fact, the effectiveness of using dependency information of words has been reported in some previous NLP tasks, for example, in dependencybased word embeddings, relation classification and sentence classification tasks (Liu et al., 2015; Socher et al., 2014; Levy and Goldberg, 2014; Komnios, 2016; Ono and Hatano, 2014) . It has been shown that the combination of words and their dependency information can boost performance. Besides, in the work of Vinyals et al. , they also represent output as a linearized tree structure, but their work showed that generic sequence-to-sequence approaches can achieve excellent results on syntactic constituency parsing. At a glance, our proposed method is a little similar to the works of Dyer et al., Aharoni et al., Eriguchi et al., Wu et al. (Dyer et al., 2016; Aharoni and Goldberg, 2017; Eriguchi et al., 2017; Wu et al., 2017) in use of parse tree and generation. However, Dyer et al. and Aharoni et al.'s works concern predicting constituent trees. Eriguchi et al.'s model employs syntactic dependency parsing but their model is hybridized the decoder of NMT and the Recurrent Neural Network Grammars, and the target sentences are parsed in transition-based parsing. Wu et al.'s model also employs dependency parsing but their model separately predicts the target translation sequence and parsing action sequence which maps to translation. On the other hand, our proposed model's decoder directly predicts the linearized dependency tree itself in a single neural network in Depth-first preorder order so that the next-word token is generated based on syntactic relations and tree construction itself. In other words, our model is able to learn and produce a tree of words and their dependency relations by itself.", |
|
"cite_spans": [ |
|
{ |
|
"start": 221, |
|
"end": 239, |
|
"text": "(Liu et al., 2015;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 240, |
|
"end": 260, |
|
"text": "Socher et al., 2014;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 261, |
|
"end": 285, |
|
"text": "Levy and Goldberg, 2014;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 286, |
|
"end": 300, |
|
"text": "Komnios, 2016;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 301, |
|
"end": 322, |
|
"text": "Ono and Hatano, 2014)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 743, |
|
"end": 805, |
|
"text": "Aharoni et al., Eriguchi et al., Wu et al. (Dyer et al., 2016;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 806, |
|
"end": 833, |
|
"text": "Aharoni and Goldberg, 2017;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 834, |
|
"end": 856, |
|
"text": "Eriguchi et al., 2017;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 857, |
|
"end": 873, |
|
"text": "Wu et al., 2017)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In our proposed approach, the neural network model is trained to map the target-side output in a linearized dependency tree construction from the source-side input in a sequence. Thus, we call this model Sequence-to-Dependency (Seq2Dep) model. The problem is defined as follows: Given a source sequence", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequence-to-Dependency Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "X = (x 1 , x 2 , . . . , x N ) of length N", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequence-to-Dependency Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": ", we want the model to encode the input sequence X and decode it to a tree structure with both words and dependency information conditioned on the encoded vector. Therefore, the output will be represented in the form (LY ) = (ly 1 , ly 2 , . . . , ly M ). The conditional probability p(ly|x) is decomposed as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequence-to-Dependency Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p(ly|x) = \u221e i=1 p(ly i |ly <i , x),", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Sequence-to-Dependency Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "in which (ly 1 , ly 2 , ..., ly M ) are words or dependency labels. Therefore, the hidden state s j at time step j is computed as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequence-to-Dependency Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "s j = cGRU att s j\u22121 , ly j\u22121 , C j ,", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Sequence-to-Dependency Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "and the next token ly j , which may be a word or dependency label, will be generated as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequence-to-Dependency Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "ly j = f s j , ly j\u22121 , C j ,", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Sequence-to-Dependency Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In this paper, dependencies are defined as the dependency labels which are achieved from the Stanford Dependency Parser (Chen and Manning, 2014) . The decoder will decode the next output based on relations between governors and dependents in a linearized tree structure. In regards to the order of generating the dependency labels and the words, the decoder will produce these symbols in a manner called Depth-first pre-order traversal.", |
|
"cite_spans": [ |
|
{ |
|
"start": 120, |
|
"end": 144, |
|
"text": "(Chen and Manning, 2014)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequence-to-Dependency Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In this section, we will describe the model stepby-step as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequence-to-Dependency Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Since there is no parallel corpus in which the source-side is represented in sequence and targetside is represented in linearized dependency tree, we have to prepare data for training by doing dependency parsing for the target-side language.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Processing Data", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In this paper, we do experiments on a French-English language pair so we use the Stanford Dependency Parser to obtain dependency parsing results for English. The Stanford Dependency Parser produces results in the form of a tree structure in which each word of the sentence is the dependent of exactly one token, either another word in the sentence or the distinguished \"ROOT-0\" token. The parsing result is represented in the format \"abbreviated relation name(governor, depen-dent)\" in which a governor is a head word and dependency is a syntactic relation between a governor and a dependent. The governor and the dependent are words in the sentence. This dependency parsing result will be transformed in another step for traversing the tree, which will be described in the next section to create a dependency tree. The dependency tree represents the target language as an ordered tree structure which is necessary for training. The reason we chose the Stanford Dependency Parser for the parsing portion of this method is because it can represent the order of words in sentence. This information of the order is useful to traverse tree in the following step.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dependency Parsing", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "In this section, we describe the Tree Transform and Tree Traversal process in which output in a linearized dependency tree form is created from the Stanford Dependency Parsing tree. For example, given a sentence \"She ate an apple today .\", after obtaining dependency parsing tree from the above dependency parsing phase, we move the rooted \"ate\" and \"apple\" headwords to the same layers of their dependents which are directly connected to the headwords. We also concurrently make consideration to their positions in order while shifting headwords. The headwords are shifted in such a manner that the word order of sentence can be preserved, so we can evaluate the translated output afterwards. Next, the tree structure obtained in the fist step will be transformed into another tree structure for the next tree traversal step. Then we traverse this tree in a Depthfirst pre-order traversal, which is the search tree in which tree is traversed from its left subtree to right subtree recursively until current node is empty, to create output with a linearized tree structure to train the model. That is, for each rooted subtree, governors and dependency labels of the sentence are predicted first, and their information will be used to predict the next dependent words. In other words, the model can capture the dependency information between label-word and wordword pairs to predict the next tokens. This means that the model is capable of modeling grammatical dependencies in the output symbols. Also, in Seq2Dep model, we define the Nonterminal \"{DEPENDENCY LABEL\", and Node-closing \"}\" tokens. Nonterminal indicates subtree (Dong and Lapata, 2016) , which means open subtree to visit its children nodes. Node-closing indicates end-of- INSERT label's children subtree, that means finishing subtree traversal and returning to the upper layer to continue the next subtree traversal. And these defined tokens do not appear in original source and target datasets. Algorithms 1 and 2 show the definition of transformation and tree traversal in more detail respectively. The purpose of using Depth-first pre-order traversal is as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 1626, |
|
"end": 1649, |
|
"text": "(Dong and Lapata, 2016)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transformation and Tree Traversal", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "1. To keep the words of the target language sequence in order when they are generated. With this generating order, the word order of the sentence is preserved, thus, we do not have to do any post-processing subsequently.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transformation and Tree Traversal", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "2. To utilize both information of the words and the dependency labels generated in the previous rooted subtree to predict the tokens of the next rooted subtree. Figures 1, 2 and 3 show the Stanford dependency parsing tree, tree structure after the positions of \"head\" words are shifted and Depth-first pre-order Tree Traversal.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 161, |
|
"end": 179, |
|
"text": "Figures 1, 2 and 3", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Transformation and Tree Traversal", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "The proposed (Seq2Dep) model consists of an encoder which is a bidirectional GRU layer as in Bahdanau's model (2015) 1 . The input embeddings of the source sentences are shared by the forward and backward GRU, and the hidden states of the corresponding forward and backward GRU are added to obtain the hidden representation for that time step. The decoder of the model will decode the output as words and dependency labels in a linearized dependency tree structure in 1 https://github.com/nyu-dl/dl4mt-tutorial if Node.children.size! = 0 then 9:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequence-to-Dependency Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Recursively call Traverse(T,N)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequence-to-Dependency Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "10:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequence-to-Dependency Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "in pre-order traverse 11:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequence-to-Dependency Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "else 12:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequence-to-Dependency Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "if Node is Nonterminal then 13:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequence-to-Dependency Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "OUTPUT Node-opening 14:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequence-to-Dependency Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "VISIT children 15:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequence-to-Dependency Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "OUTPUT Node-closing 16:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequence-to-Dependency Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "else 17:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequence-to-Dependency Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "OUTPUT Node a Depth-first pre-order traversal. Figure 4 shows the decoder which generates both dependency labels and words in the Seq2Dep model. In Figure 4 , the previous token and context vector feeding are omitted for simplicity.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 47, |
|
"end": 55, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 148, |
|
"end": 157, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sequence-to-Dependency Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In our experiment, the proposed model was trained on the French-English parallel corpus of the Europarl-v7 dataset. We used newstest2011 and newstest2012 of WMT16 as development and test data respectively. To confirm translation for long sentences, the whole test set was used without removing any sentences with a maximum length of 50 or 80. We performed experiment on the following two datasets:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 Europarl-v7 dataset consisting of sentences with a maximum length of 50.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 Europarl-v7 dataset consisting of sentences with a maximum length of 80.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "For preprocessing data, we filtered out sentences which were longer than the above maximum lengths and cleaned the special symbols or characters which were not strings. We also omitted sentences which had multiple sentences in one line. The reason is that the parsing results obtained from the Stanford Dependency Parser in parsing step would contain multi \"{ROOT\" tokens for sentences which have multiple sentences in one line, while it is necessary to generate the next child nodes starting from just one top {ROOT of a tree. Next, we tokenize and lowercase this dataset and perform dependency parsing. After that, we traverse the tree in a Depth-first pre-order to create the parallel corpus for the training model in which the source language, French is in sequence form, and the target language, English is in a linearized dependency tree structure form. The longer sentences are(particularly sentences with a maximum length of 80 tokens), the more CPU's memory and time cost for this processing data step. In addition, we built a dictionary of the target language (English) that consists of both words and dependency labels. In this dictionary, we define 74 dependency labels based on the current representation of grammatical relations of the Stanford Dependency Parser.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In order to evaluate the performance of the proposed method, we set the same hyperparameters as the attention-based cGRU model in DL4MT-Tutorial and compare the obtained results of both Seq2Seq and Seq2Dep models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Settings", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The recurrent transformation weights for gates and hidden state proposal matrices were initialized as random orthogonal matrices. Weights were optimized using the Adadelta algorithm and were updated with a mini-batch size of 32 sentences. The vocabulary sizes of both source and target languages were set at 30k words, the beam size was set to 5, dropout was not applied and the gradients were clipped at 1.0. Morever, because the generated tokens are not only words but also dependency labels in Seq2Dep model, the maxlen parameter was set up so that dependency labels are not counted, therefore long sentences will not be removed in training.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Settings", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In the experiments, we trained the following 2 models on 1.65M sentences with a maximum length of 50 and 1.89M sentences with a maximum length of 80 from the Europarl-v7 French-English bitext.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Training", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "This model is a Seq2Seq model with attention mechanism as in Firat (2016) that consists of an encoder that encodes the source language input in sequence form and a decoder that decodes target language output in sequence form.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The proposed method. In this model, the model architecture is the same as the attention-based Seq2Seq model but the input is in sequence form and the output is in linearized dependency tree structure.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Seq2Dep Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In the Seq2Dep model, because the output consists of both words and dependency labels, we evaluated the result with post-processing, which is the process that removes the dependency labels from the translated result. From this section onwards, we will refer to the Seq2Seq and Seq2Dep models with sentences of maximum length 50 and 80 tokens as Seq2Seq-50, Seq2Dep-50, Seq2Seq-80 and Seq2Dep-80. As a result, the BLEU score of Seq2Dep-50 with post-processing was 20.88, which is higher than the BLEU score of 19.31 obtained by the attention-based Seq2Seq-50 model with a gain of up to 1.57 points. Similarly, the BLEU score improved by 2.40 points for datasets with maximum sentence lengths of 80. Table 1 shows BLEU and METEOR scores and TER error of the attention-based Seq2Seq and Seq2Dep models. Figure 5 shows the relation between BLEU score and the length of sentence. Moreover, when we made a trial to evaluate the translation results without post-processing, the BLEU scores without post-processing were 42.76 and 43.41 for both datasets. From these scores, it is thought that the model can predict not only word-based tokens but also dependency labels well.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 698, |
|
"end": 705, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 800, |
|
"end": 808, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In order to verify the ability of the proposed approach to solve the repetition problem of NMT, over-translation, we measured the repetition of words in the translation results of attention-based Seq2Seq and Seq2Dep learnings in this section. The repetition rate is measured by the following formula:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Additional Experiments", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "rep rat = T (y) i=1 1 + r( y i ) 1 + r(Y ) ,", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Additional Experiments", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "in which y i and Y i are the i th hypothesis sentence and i th reference sentence respectively, and r is the number of the repeated words and is computed by:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Additional Experiments", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "r(X) = len(X) \u2212 len(set(X))", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Additional Experiments", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "in which len(X) is the length of the sentence X and len(set(X)) is the number of words that are not repeated in sentence X. For example, given the sentence X=\"The big fish ate the smaller fish\", in this case, set(X)={The, big, fish, ate, smaller}, len(X)=7, len(set(X))=5. Figure 6 shows the comparison of repetition rate in both models in which the horizontal axis is the length of sentences, vertical axis is the repetition rate respectively. In Figure 6, the repetition rate in both Seq2Seq and Seq2Dep learnings decreases as the length of the sentences increases. From Figure 6 , we can see that the more tokens the model learns, the more the repetition rate decreases. Also, the repetition rate is reduced in the Seq2Dep model compared to the attention-based Seq2Seq model. In figure 5, except the span in which the sentence length is between 41 and 51 words, the BLEU score of the Seq2Dep model goes up gradually and almost overcomes that of the attention-based Seq2Seq model. The BLEU score falls from 19.31 to 16.97 with a 2.34 points difference for the attention-based Seq2Seq model while the point difference is 1.51 in the Seq2Dep model. From the experiments, we confirm that by using the syntactic dependency information, the Seq2Dep model can learn well and reduce the drop in BLEU score compared to the baseline model even if the sentence is very long. Besides, we can see the BLEU score is low for short sentences which have a length of 10 words or less. This is because of the brevity penalty on short sentences in BLEU (Papineni et al., 2002) . With regards to the BLEU score without post-processing, we see that the score of the Seq2Dep-80 model is higher than that of the Seq2Dep-50 model. The reason could be: The longer the sentences are, the more syntactic de-pendencies the models require for generating better outputs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1536, |
|
"end": 1559, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 273, |
|
"end": 281, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 448, |
|
"end": 454, |
|
"text": "Figure", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 573, |
|
"end": 581, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Additional Experiments", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Also, in terms of the over-translation problem, Figure 6 shows that the repetition rates of the two models decrease gradually with respect to the length of the sentences and the Seq2Dep model has a lower repetition rate. When we checked the translation results, we saw that Node-closing token \"}\" was almost generated after each subtree. Moreover, we saw that there were some very long sentences which the over-generation of \"UNK\"s occurred in the translation result of Seq2Seq model while that did not occur in translation results of Seq2Deq model. Our assumption is that after generating subtree, the Seq2Dep model can learn that it should generate the Node-closing token \"}\" next, instead of a chain of words. In other words, as mentioned in Kuncoro et al.'s work (Kuncoro et al., 2016) in which modeling of composition can achieve better performance, the Seq2Dep model which learns about the syntactic dependencies and tree structure performance is probably able to learn the blocks of the form \"Nonterminal word }\" like a phrase-structure in sen- Figure 6 : Comparison of the repetition rate of the baseline and Seq2Dep models tences, so it is unlikely to generate the same word repeatedly. Therefore, it is possible to prevent the long repeated words in long sentences. Usually, because the block of the form \"Nonterminal word }\" is seen as a phrase in sentence or a subtree in tree structure, and it is rare for a phrase to occur repeatedly in sentence or for a subtree to repeat in a tree structure, so it is assumed that repetition of the blocks of form \"Nonterminal word }\" are also rare.", |
|
"cite_spans": [ |
|
{ |
|
"start": 745, |
|
"end": 789, |
|
"text": "Kuncoro et al.'s work (Kuncoro et al., 2016)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 48, |
|
"end": 56, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1052, |
|
"end": 1060, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Additional Experiments", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In this work, we proposed a method in which the Seq2Dep NMT model is trained by utilizing syntactic dependencies to provide the model more abundant information. In other words, Seq2Dep model learns the potential internal relative connections among tokens and their long term syntactic dependencies to predict the next-word tokens. Furthermore, the Seq2Dep model can also generate output as a linearized dependency tree structure in a Depth-first pre-order tree traversal over words and dependencies. The purpose of this work is to alleviate issues of translating long sentences and repetitive translation. We conduct experiments on the French-English parallel corpus of the Europarl-v7 dataset to compare the performance of the proposed method with the attention-based Seq2Seq model. The results demonstrated that the proposed model achieved a 1.57 and 2.40 points BLEU score improvement for sentences of length at most 50 and 80 tokens re-spectively. Moreover, experiments verify that the proposed model also reduces the over-translation, particularly long sentences with over-generation of \"UNK\"s.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "\u2022 Confirm how accurate the Seq2Dep model generates the dependency labels and the whole tree structure as well.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Future work", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "\u2022 In this paper, to compare performance of the proposed method with the baseline model, we set the same hyperparameters as the attention-based cGRU model in dl4mttutorial and trained the Seq2Dep model on only Europarl-v7 dataset. Since experiments were done on small vocabulary size and dataset, we plan to train the model on larger vocabulary and datasets with subword units segmentation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Future work", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "\u2022 For future work, we plan to train models on datasets which consist of only long sentences with more than 50 or 80 tokens to compare the performance of long-sentences translation of the approach and baseline model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Future work", |
|
"sec_num": "9" |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We thank Assistant Professor Shindo Hiroyuki, Ouchi Hiroki, Michael Wentao Li of the NAIST Computational Linguistics Laboratory, and the reviewers for their valuable and constructive comments. Part of this work was supported by JSPS KAKENHI Grant Number JP17H06101.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Towards string-to-tree neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Roee", |
|
"middle": [], |
|
"last": "Aharoni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roee Aharoni and Yoav Goldberg. 2017. Towards string-to-tree neural machine translation. CoRR, abs/1704.04743.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Improving sequence to sequence learning for morphological inflection generation: The biumit systems for the sigmorphon 2016 shared task for morphological reinflection", |
|
"authors": [ |
|
{ |
|
"first": "Roee", |
|
"middle": [], |
|
"last": "Aharoni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonatan", |
|
"middle": [], |
|
"last": "Belinkov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 14th Annual SIGMORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "41--48", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roee Aharoni, Yoav Goldberg, and Yonatan Belinkov. 2016. Improving sequence to sequence learning for morphological inflection generation: The biu- mit systems for the sigmorphon 2016 shared task for morphological reinflection. In Proceedings of the 14th Annual SIGMORPHON Workshop on Com- putational Research in Phonetics, Phonology, and Morphology, pages 41-48.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Neural machine translation by jointly learning to align and translate", |
|
"authors": [ |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2015. Neural machine translation by jointly learning to align and translate. In Proceedings of ICLR 2015.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Neural versus phrasebased machine translation quality: a case study", |
|
"authors": [ |
|
{ |
|
"first": "Luisa", |
|
"middle": [], |
|
"last": "Bentivogli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arianna", |
|
"middle": [], |
|
"last": "Bisazza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mauro", |
|
"middle": [], |
|
"last": "Cettolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcello", |
|
"middle": [], |
|
"last": "Federico", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luisa Bentivogli, Arianna Bisazza, Mauro Cettolo, and Marcello Federico. 2016. Neural versus phrase- based machine translation quality: a case study. CoRR, abs/1608.04631.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "A fast and accurate dependency parser using neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christoher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Danqi Chen and Christoher D. Manning. 2014. A fast and accurate dependency parser using neural net- works. In Proceedings of EMNLP 2014.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Language to logical form with neural attention", |
|
"authors": [ |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of ACL 2016", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "33--43", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Li Dong and Mirella Lapata. 2016. Language to logi- cal form with neural attention. Proceedings of ACL 2016, pages 33-43.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Learning to parse and translate improves neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Akiko", |
|
"middle": [], |
|
"last": "Eriguchi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshimasa", |
|
"middle": [], |
|
"last": "Tsuruoka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Akiko Eriguchi, Yoshimasa Tsuruoka, and Kyunghyun Cho. 2017. Learning to parse and translate improves neural machine translation. CoRR, abs/1702.03525.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Multi-way, multilingual neural machine translation with a shared attention mechanism", |
|
"authors": [ |
|
{ |
|
"first": "Orhan", |
|
"middle": [], |
|
"last": "Firat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Orhan Firat, Kyunghyun Cho, and Yoshua Ben- gio. 2016. Multi-way, multilingual neural ma- chine translation with a shared attention mechanism. arXiv.org1601.01073.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Dependency based embeddings for sentence classification tasks", |
|
"authors": [ |
|
{ |
|
"first": "Alexandros", |
|
"middle": [], |
|
"last": "Komnios", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of NAACL-HLT 2016", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1490--1500", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexandros Komnios. 2016. Dependency based em- beddings for sentence classification tasks. In Pro- ceedings of NAACL-HLT 2016, pages 1490-1500.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "What do recurrent neural network grammars learn about syntax? CoRR", |
|
"authors": [ |
|
{ |
|
"first": "Adhiguna", |
|
"middle": [], |
|
"last": "Kuncoro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miguel", |
|
"middle": [], |
|
"last": "Ballesteros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lingpeng", |
|
"middle": [], |
|
"last": "Kong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graham", |
|
"middle": [], |
|
"last": "Neubig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adhiguna Kuncoro, Miguel Ballesteros, Lingpeng Kong, Chris Dyer, Graham Neubig, and Noah A. Smith. 2016. What do recurrent neural net- work grammars learn about syntax? CoRR, abs/1611.05774.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Dependencybased word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of ACL 2014", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "302--308", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Omer Levy and Yoav Goldberg. 2014. Dependency- based word embeddings. In Proceedings of ACL 2014, pages 302-308.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Towards zero unknown word in neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Xiaoqing", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiajun", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chengqing", |
|
"middle": [], |
|
"last": "Zong", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of IJCAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaoqing Li, Jiajun Zhang, and Chengqing Zong. 2016. Towards zero unknown word in neural machine translation. In Proceedings of IJCAI 2016.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "A dependency-based neural network for relation classification", |
|
"authors": [ |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Furu", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sujian", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heng", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Houfeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of ACL 2015", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "285--290", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang Liu, Furu Wei, Sujian Li, Heng Ji, Ming Zhou, and Houfeng Wang. 2015. A dependency-based neural network for relation classification. In Pro- ceedings of ACL 2015, pages 285-290.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Effective approaches to attentionbased neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of EMNLP 2015", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1412--1421", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thang Luong, Hieu Pham, and Christopher D. Man- ning. 2015. Effective approaches to attention- based neural machine translation. In Proceedings of EMNLP 2015, pages 1412-1421.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Coverage embedding models for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Haitao", |
|
"middle": [], |
|
"last": "Mi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiguo", |
|
"middle": [], |
|
"last": "Baskaran Sankaran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abe", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ittycheriah", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of EMNLP 2016", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "955--960", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Haitao Mi, Baskaran Sankaran, Zhiguo Wang, and Abe Ittycheriah. 2016. Coverage embedding mod- els for neural machine translation. In Proceedings of EMNLP 2016, pages 955-960.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Dependency parsing and its application using hierarchical structure in japanese language", |
|
"authors": [ |
|
{ |
|
"first": "Kazuki", |
|
"middle": [], |
|
"last": "Ono", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenji", |
|
"middle": [], |
|
"last": "Hatano", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "International Journal on Advances in Internet Technology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kazuki Ono and Kenji Hatano. 2014. Dependency parsing and its application using hierarchical struc- ture in japanese language. International Journal on Advances in Internet Technology, vol 7 no 3, 4.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Bleu: a method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of ACL 2002", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic eval- uation of machine translation. Proceedings of ACL 2002, pages 311-318.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Grounded compositional semantics for finding and describing images with sentences", |
|
"authors": [ |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrej", |
|
"middle": [], |
|
"last": "Karpathy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "207--218", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Richard Socher, Andrej Karpathy, Quoc V. Le, Christo- pher D. Manning, and Andrew Y. Ng. 2014. Grounded compositional semantics for finding and describing images with sentences. In Transactions of the Association for Computational Linguistics, pages 2: 207-218.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Sequence to sequence learning with neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Quoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of NIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V. Le. 2014. Sequence to sequence learning with neural net- works. Proceedings of NIPS 2014.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "A multifaceted evaluation of neural versus phrasebased machine translation for 9 language directions", |
|
"authors": [ |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Toral", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "V\u00edctor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "S\u00e1nchez-Cartagena", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Antonio Toral and V\u00edctor M. S\u00e1nchez-Cartagena. 2017. A multifaceted evaluation of neural versus phrase- based machine translation for 9 language directions. CoRR, abs/1701.02901.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Neural machine translation with reconstruction", |
|
"authors": [ |
|
{ |
|
"first": "Zhaopeng", |
|
"middle": [], |
|
"last": "Tu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lifeng", |
|
"middle": [], |
|
"last": "Shang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaohua", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of Association for the Advancement of Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhaopeng Tu, Yang Liu, Lifeng Shang, Xiaohua Liu, and Hang Li. 2016a. Neural machine translation with reconstruction. In Proceedings of Association for the Advancement of Artificial Intelligence 2016.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Coverage-based neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Zhaopeng", |
|
"middle": [], |
|
"last": "Tu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhengdong", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaohua", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhaopeng Tu, Zhengdong Lu, Yang Liu, Xiaohua Liu, and Hang Li. 2016b. Coverage-based neural ma- chine translation. CoRR, abs/1601.04811.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Modeling coverage for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Zhaopeng", |
|
"middle": [], |
|
"last": "Tu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhengdong", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaohua", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of ACL 2016", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "76--85", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhaopeng Tu, Zhengdong Lu, Yang Liu, Xiaohua Liu, and Hang Li. 2016c. Modeling coverage for neural machine translation. In Proceedings of ACL 2016, pages 76-85.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Grammar as a foreign language", |
|
"authors": [ |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Terry", |
|
"middle": [], |
|
"last": "Koo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Slav", |
|
"middle": [], |
|
"last": "Petrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Hinton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oriol Vinyals, Lukasz Kaiser, Terry Koo, Slav Petrov, Ilya Sutskever, and Geoffrey E. Hinton. 2014. Grammar as a foreign language. CoRR, abs/1412.7449.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Sequence-to-dependency neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Shuangzhi", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dongdong", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nan", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mu", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of ACL 2017", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "698--707", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shuangzhi Wu, Dongdong Zhang, Nan Yang, Mu Li, and Ming Zhou. 2017. Sequence-to-dependency neural machine translation. Proceedings of ACL 2017, pages 698-707.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Recurrent neural machine translation. CoRR", |
|
"authors": [ |
|
{ |
|
"first": "Biao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deyi", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jinsong", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Biao Zhang, Deyi Xiong, and Jinsong Su. 2016. Recurrent neural machine translation. CoRR, abs/1607.08725.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"text": "Stanford Dependency Parsing Tree", |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"text": "Dependency tree after shifting the positions of \"head\" words Figure 3: Depth-first pre-order Tree Traversal Encoder and decoder of Seq2Dep model", |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"text": "Comparison of BLEU score with respect to the length of sentences 7 Analysis and Discussion", |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"TABREF2": { |
|
"html": null, |
|
"type_str": "table", |
|
"text": "Translation quality as measured by different metrics.", |
|
"num": null, |
|
"content": "<table><tr><td>Model</td><td colspan=\"2\">Post-processing BLEU METEOR</td><td>TER</td></tr><tr><td colspan=\"2\">Seq2Seq-50 19.31</td><td>26.3</td><td>66.1</td></tr><tr><td colspan=\"2\">Seq2Dep-50 20.88</td><td>27.0</td><td>62.5</td></tr><tr><td colspan=\"2\">Seq2Seq-80 16.97</td><td>25.5</td><td>78.5</td></tr><tr><td colspan=\"2\">Seq2Dep-80 19.37</td><td>25.6</td><td>65.6</td></tr></table>" |
|
} |
|
} |
|
} |
|
} |