|
{ |
|
"paper_id": "N19-1014", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:00:33.217863Z" |
|
}, |
|
"title": "Improving Grammatical Error Correction via Pre-Training a Copy-Augmented Architecture with Unlabeled Data", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Yuanfudao Research", |
|
"location": { |
|
"settlement": "Beijing", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "zhaowei01@fenbi.com" |
|
}, |
|
{ |
|
"first": "Liang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Yuanfudao Research", |
|
"location": { |
|
"settlement": "Beijing", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "wangliang01@fenbi.com" |
|
}, |
|
{ |
|
"first": "Kewei", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Yuanfudao Research", |
|
"location": { |
|
"settlement": "Beijing", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "shenkw@fenbi.com" |
|
}, |
|
{ |
|
"first": "Ruoyu", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Yuanfudao Research", |
|
"location": { |
|
"settlement": "Beijing", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jingming", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Yuanfudao Research", |
|
"location": { |
|
"settlement": "Beijing", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Neural machine translation systems have become state-of-the-art approaches for Grammatical Error Correction (GEC) task. In this paper, we propose a copy-augmented architecture for the GEC task by copying the unchanged words from the source sentence to the target sentence. Since the GEC suffers from not having enough labeled training data to achieve high accuracy. We pre-train the copy-augmented architecture with a denoising auto-encoder using the unlabeled One Billion Benchmark and make comparisons between the fully pre-trained model and a partially pretrained model. It is the first time copying words from the source context and fully pretraining a sequence to sequence model are experimented on the GEC task. Moreover, We add token-level and sentence-level multi-task learning for the GEC task. The evaluation results on the CoNLL-2014 test set show that our approach outperforms all recently published state-of-the-art results by a large margin. The code and pre-trained models are released at https://github.com/zhawe01/fairseq-gec.", |
|
"pdf_parse": { |
|
"paper_id": "N19-1014", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Neural machine translation systems have become state-of-the-art approaches for Grammatical Error Correction (GEC) task. In this paper, we propose a copy-augmented architecture for the GEC task by copying the unchanged words from the source sentence to the target sentence. Since the GEC suffers from not having enough labeled training data to achieve high accuracy. We pre-train the copy-augmented architecture with a denoising auto-encoder using the unlabeled One Billion Benchmark and make comparisons between the fully pre-trained model and a partially pretrained model. It is the first time copying words from the source context and fully pretraining a sequence to sequence model are experimented on the GEC task. Moreover, We add token-level and sentence-level multi-task learning for the GEC task. The evaluation results on the CoNLL-2014 test set show that our approach outperforms all recently published state-of-the-art results by a large margin. The code and pre-trained models are released at https://github.com/zhawe01/fairseq-gec.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Grammatical Error Correction (GEC) is a task of detecting and correcting grammatical errors in text. Due to the growing number of language learners of English, there has been increasing attention to the English GEC, in the past decade.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The following sentence is an example of the GEC task, where the word in bold needs to be corrected to its adverb form.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Nothing is [absolute \u2192 absolutely] right or wrong.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Although machine translation systems have become state-of-the-art approaches for GEC, GEC is different from translation since it only changes several words of the source sentence. In Table 1 Table 1 : The ratio of unchanged words in the target sentence to the source sentence. \"Sent.\" means the sentence number. \"Tok.\" means the token number of the target sentence. \"Same %\" means the same word percentage.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 183, |
|
"end": 190, |
|
"text": "Table 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 191, |
|
"end": 198, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "we list the ratio of unchanged words of the target sentence to the source sentence in three different datasets. We can observe that more than 80% of the words can be copied from the source sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Considering the percentage of unchanged words is high in the GEC task, a more proper neural architecture is needed for it. We enhance the current neural architecture by enabling it to copy the unchanged words and the out-of-vocabulary words directly from the source sentence, just as what humans do when they correct sentences. To our knowledge, this is the first time that neural copying mechanism is used on GEC.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Progresses have been made thanks to largescale training corpus, including NUS Corpus of Learner English (NUCLE) (Dahlmeier et al., 2013) and the large-scale Lang-8 corpus (Tajiri et al., 2012) . However, even with millions of labeled sentences, automatic GEC is challenging due to the lack of enough labeled training data to achieve high accuracy.", |
|
"cite_spans": [ |
|
{ |
|
"start": 112, |
|
"end": 136, |
|
"text": "(Dahlmeier et al., 2013)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 171, |
|
"end": 192, |
|
"text": "(Tajiri et al., 2012)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To alleviate the problem of insufficient labeled data, we propose a method to leverage the unlabeled data. The concrete way is to pre-train our copy-augmented model with the unlabeled One Billion Benchmark (Chelba et al., 2013) by leveraging denoising auto-encoders.", |
|
"cite_spans": [ |
|
{ |
|
"start": 206, |
|
"end": 227, |
|
"text": "(Chelba et al., 2013)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We also add two multi-tasks for the copyaugmented architecture, including a token-level labeling task and a sentence-level copying task, to further improve the performance of the GEC task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The copying mechanism is for the first time used on the GEC task, which was used on text summarization tasks. On the GEC task, copying mechanism enables training a model with a small vocabulary since it can straightly copy the unchanged and out-of-vocabulary words from the source input tokens. Besides, by separating the constant part of the work from the GEC task, copying makes the generating portion of the architecture more powerful. In the experiment section of this paper, we show that copying does more than just solving the \"UNK problem\", and it can also recall more edits for the GEC problem.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The copy-augmented architecture outperforms all the other architectures on the GEC task, by achieving a 56.42 F 0.5 score on the CoNLL 2014 test data set. Combined with denoising auto-encoders and multi-tasks, our architecture achieves 61.15 F 0.5 on the CoNLL-2014 test data set, improving +4.9 F 0.5 score than state-of-the-art systems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In summary, our main contributions are as follows. 1We propose a more proper neural architecture for the GEC problem, which enables copying the unchanged words and out-ofvocabulary words directly from the source input tokens. (2) We pre-train the copy-augmented model with large-scale unlabeled data using denoising auto-encoders, alleviating the problem of the insufficient labeled training corpus. (3) We evaluate the architecture on the CoNLL-2014 test set, which shows that our approach outperforms all recently published state-of-the-art approaches by a large margin.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Neural machine translation systems have become the state-of-the-art approaches for Grammatical Error Correction (GEC), by treating the sentence written by the second language learners as the source sentence and the grammatically corrected one as the target sentence. Translation models learn the mapping from the source sentence to the target sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Base Architecture", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We use the attention based Transformer (Vaswani et al., 2017) architecture as our baseline. The Transformer encodes the source sentence with a stack of L identical blocks, and each of them applies a multi-head self-attention over the source tokens followed by position-wise feedforward layers to produce its context-aware hidden state. The decoder has the same architecture as the encoder, stacking L identical blocks of multi-head attention with feed-forward networks for the target hidden states. However, the decoder block has an extra attention layer over the encoder's hidden states.", |
|
"cite_spans": [ |
|
{ |
|
"start": 39, |
|
"end": 61, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Base Architecture", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The goal is to predict the next word indexed by t in a sequence of word tokens (y 1 , ..., y T ), given the source word tokens (x 1 , ..., x N ), as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Base Architecture", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "h src 1...N = encoder(L src x 1...N ) (1) h t = decoder(L trg y t\u22121...1 , h src 1...N ) (2) P t (w) = sof tmax(L trg h t )", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Base Architecture", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The matrix L \u2208 R dx\u00d7|V | is the word embedding matrix, where d x is the word embedding dimension and |V | is the size of the vocabulary. h src", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Base Architecture", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "1...N", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Base Architecture", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "is the encoder's hidden states and h t is the target hidden state for the next word. Applying softmax operation on the inner product between the target hidden state and the embedding matrix, we get the generation probability distribution of the next word.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Base Architecture", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "l ce = \u2212 T t=1 log(p t (y t ))", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Base Architecture", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The loss l ce of each training example is an accumulation of the cross-entropy loss of each position during decoding.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Base Architecture", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Copying mechanism was proved effective on text summarization tasks (See et al., 2017; Gu et al., 2016) and semantic parsing tasks (Jia and Liang, 2016) . In this paper, we apply the copying mechanism on GEC task, for the first time, enabling the model to copy tokens from the source sentence.", |
|
"cite_spans": [ |
|
{ |
|
"start": 67, |
|
"end": 85, |
|
"text": "(See et al., 2017;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 86, |
|
"end": 102, |
|
"text": "Gu et al., 2016)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 130, |
|
"end": 151, |
|
"text": "(Jia and Liang, 2016)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Copying Mechanism", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "As illustrated in Figure 1 , besides generating words from a fixed vocabulary, our copyaugmented network allows copying words from the source input tokens. Defined in Equation 5, the final probability distribution P t is a mix of the generation distribution P gen t and the copy distribution P copy t . As a result, the fixed vocabulary is extended by all the words appearing in the source sentence. The balance between the copying and generating is controlled by a balancing factor", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 18, |
|
"end": 26, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Copying Mechanism", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03b1 copy t \u2208 [0, 1] at each time step t. p t (w) = (1\u2212\u03b1 copy t ) * p gen t (w)+(\u03b1 copy t ) * p copy t (w)", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Copying Mechanism", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The new architecture outputs the generation probability distribution as the base model, by generating the target hidden state. The copying score over the source input tokens is calculated with a new attention distribution between the decoder's current hidden state h trg and the encoder's hidden states H src (same as h src 1...N ). The copy attention is calculated the same as the encoder-decoder attentions, listed in Equation 6, 7, 8 :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Copying Mechanism", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "q t , K, V = h trg t W T q , H src W T k , H src W T v (6) A t = q T t K (7) P copy t (w) = sof tmax(A t )", |
|
"eq_num": "(8)" |
|
} |
|
], |
|
"section": "Copying Mechanism", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The q t , K and V are the query, key, and value that needed to calculate the attention distribution and the copy hidden state. We use the normalized attention distribution as the copy scores and use the copy hidden states to estimate the balancing factor \u03b1 copy t .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Copying Mechanism", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u03b1 copy t = sigmoid(W T (A T t \u2022 V )) (9)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Copying Mechanism", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The loss function is as described in Equation 4, but with respect to our mixed probability distribution y t given in Equation 5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Copying Mechanism", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Pre-training is shown to be useful in many tasks when lacking vast amounts of training data. In this section, we propose denoising auto-encoders, which enables pre-training our models with largescale unlabeled corpus. We also introduce a partially pre-training method to make a comparison with the denoising auto-encoder.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pre-training", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Denoising auto-encoders (Vincent et al., 2008) are commonly used for model initialization to extract and select features from inputs. BERT (Devlin et al., 2018 ) used a pre-trained bi-directional transformer model and outperformed existing systems by a wide margin on many NLP tasks. In contrast to denoising auto-encoders, BERT only predicts the 15% masked words rather than reconstructing the entire input. BERT denoise the 15% of the tokens at random by replacing 80% of them with [MASK], 10% of them with a random word and 10% of them unchanged.", |
|
"cite_spans": [ |
|
{ |
|
"start": 24, |
|
"end": 46, |
|
"text": "(Vincent et al., 2008)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 139, |
|
"end": 159, |
|
"text": "(Devlin et al., 2018", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Denoising Auto-encoder", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Inspired by BERT and denoising auto-encoders, we pre-traine our copy-augmented sequence to sequence model by noising the One Billion Word Benchmark (Chelba et al., 2013) , which is a large sentence-level English corpus. In our experiments, the corrupted sentence pairs are generated by the following procedures.", |
|
"cite_spans": [ |
|
{ |
|
"start": 148, |
|
"end": 169, |
|
"text": "(Chelba et al., 2013)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Denoising Auto-encoder", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "\u2022 Delete a token with a probability of 10%.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Denoising Auto-encoder", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "\u2022 Add a token with a probability of 10%.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Denoising Auto-encoder", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "\u2022 Replace a word with a randomly picked word from the vocabulary with a probability of 10%.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Denoising Auto-encoder", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "\u2022 Shuffle the words by adding a normal distribution bias to the positions of the words and re-sort the words by the rectified positions with a standard deviation 0.5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Denoising Auto-encoder", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "With a large amount of the artificial training data, the sequence to sequence model learns to reconstruct the input sentence, by trusting most of the input tokens but not always. A sentence pair generated by the corruption process is a GEC sentence pair to some degree, since both of them are translating a not \"perfect\" sentence to a \"perfect\" sentence by deleting, adding, replacing or shuffling some tokens.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Denoising Auto-encoder", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In nature language processing (NLP), pre-training part of the model also improves many tasks' performance. Word2Vec and GloVe (Pennington et al., 2014; pre-trained word embeddings. CoVe (McCann et al., 2017) pretrained a encoder. ELMo (Peters et al., 2018) pretrained a deep bidirectional architecture, and etc. All of them are shown to be effective in many NLP tasks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 126, |
|
"end": 151, |
|
"text": "(Pennington et al., 2014;", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pre-training Decoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Following (Ramachandran et al., 2016; , we experiment with pre-training the decoder of the copyaugmented sequence-to-sequence architecture as a typical language model. We initialize the decoder of the GEC model with the pre-trained parameters, while initializing the other parameters randomly. Since we use the tied word embeddings between encoder and decoder, most parameters of the model are pre-trained, except for those of the encoder, the encoder-decoder's attention and the copy attention.", |
|
"cite_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 37, |
|
"text": "(Ramachandran et al., 2016;", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pre-training Decoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The Multi-Task Learning (MTL) solves problems by jointly training multiple related tasks, and has shown its advantages in many tasks, ranging from computer vision (Zhang et al., 2014; Dai et al., 2016) to NLP (Collobert and Weston, 2008; S\u00f8gaard and Goldberg, 2016) . In this paper, we explore two different tasks for GEC to improve the performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 163, |
|
"end": 183, |
|
"text": "(Zhang et al., 2014;", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 184, |
|
"end": 201, |
|
"text": "Dai et al., 2016)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 209, |
|
"end": 237, |
|
"text": "(Collobert and Weston, 2008;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 238, |
|
"end": 265, |
|
"text": "S\u00f8gaard and Goldberg, 2016)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-Task Learning", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We propose a token-level labeling task for the source sentence, and assign each token in the source sentence a label indicating whether this token is right/wrong.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Token-level Labeling Task", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Assuming that each source token x i can be aligned with a target token y j , we define that the source token is right if x i = y j , and wrong otherwise. Each token's label is predicted by passing the final state h src i of the encoder through a softmax after an affine transformation, as shown in Equation 10.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Token-level Labeling Task", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "p(label i |x 1...N ) = sof tmax(W T h src i ) (10)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Token-level Labeling Task", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "This token-level labeling task explicitly augment the input tokens' correctness to the encoder, which can later be used by the decoder.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Token-level Labeling Task", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The primary motivation behind the sentence-level copying task is to make the model do more copying when the input sentence looks entirely correct.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence-level Copying Task", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "During training, we send equal number of sampled correct sentence pairs and the edited sentence pairs to the model. When inputting the right sentences, we remove the decoder's attention over the outputs of the encoder. Without the encoderdecoder attention, the generating work gets hard. As a result, the copying part of the model will be boosted for the correct sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence-level Copying Task", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "As previous studies, we use the public NUCLE (Dahlmeier et al., 2013) , Lang-8 (Tajiri et al., 2012) and FCE (Yannakoudakis et al., 2011) corpus as our parrallel training data. The unlabeled dataset we use is the well-known One Billion Word Benchmark (Chelba et al., 2013) . We choose the test set of CoNLL-2014 shared task as our test set and CoNLL-2013 test data set (Dahlmeier et al., 2013) as our development benchmark. For the CoNLL data sets, the Max-Match (M 2 ) scores (Dahlmeier and Ng, 2012) were reported, and for the JFLEG (Napoles et (Sakaguchi et al., 2016) were reported.", |
|
"cite_spans": [ |
|
{ |
|
"start": 45, |
|
"end": 69, |
|
"text": "(Dahlmeier et al., 2013)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 79, |
|
"end": 100, |
|
"text": "(Tajiri et al., 2012)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 109, |
|
"end": 137, |
|
"text": "(Yannakoudakis et al., 2011)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 251, |
|
"end": 272, |
|
"text": "(Chelba et al., 2013)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 369, |
|
"end": 393, |
|
"text": "(Dahlmeier et al., 2013)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 477, |
|
"end": 501, |
|
"text": "(Dahlmeier and Ng, 2012)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 535, |
|
"end": 546, |
|
"text": "(Napoles et", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 547, |
|
"end": 571, |
|
"text": "(Sakaguchi et al., 2016)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "To make our results comparable to state-of-theart results in the field of GEC, we limit our training data strictly to public resources. Table 2 and Table 3 list all the data sets that we use in this paper.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 136, |
|
"end": 156, |
|
"text": "Table 2 and Table 3", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We build a statistical-based spell error correction system and correct the spell errors in our training data. Following (Ge et al., 2018; Chollampatt and Ng, 2018) and etc., we apply spell correction before evaluation for our dev/test datasets. A 50,000-word dictionary is extracted from the spell-corrected Lang-8 data corpus. Like previous works, we remove the unchanged sentence pairs in the Lang-8 corpus before training.", |
|
"cite_spans": [ |
|
{ |
|
"start": 120, |
|
"end": 137, |
|
"text": "(Ge et al., 2018;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 138, |
|
"end": 163, |
|
"text": "Chollampatt and Ng, 2018)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "In this paper, we use the Transformer implementation in the public FAIR Sequence-to-Sequence Toolkit 1 (Gehring et al., 2017) codebase.", |
|
"cite_spans": [ |
|
{ |
|
"start": 103, |
|
"end": 125, |
|
"text": "(Gehring et al., 2017)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model and Training Settings", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "For the transformer model, we use token embeddings and hidden size of dimension 512, and the encoder and decoder have 6 layers and 8 attention heads. For the inner layer in the positionwise feed-forward network, we use 4096. Similar to previous models we set the dropout to 0.2. A 50,000 vocabulary for the input and output tokens are collected from the training data. In total, this model has 97M parameters.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model and Training Settings", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Models are optimized with Nesterovs Accelerated Gradient (Nesterov, 1983) . We set the learning rate with 0.002, the weight decay 0.5, the patience 0, the momentum 0.99 and minimum learn-ing rate 10-4. During training, we evaluate the performance on the development set for every epoch.", |
|
"cite_spans": [ |
|
{ |
|
"start": 57, |
|
"end": 73, |
|
"text": "(Nesterov, 1983)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model and Training Settings", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We also use edit-weighted MLE objective as , by scaling the loss of the changed words with a balancing factor \u039b.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model and Training Settings", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Almost the same architecture and hyperparameters are used when pre-training using unlabeled data, except the \u039b parameter for editweighted loss. We set \u039b = 3 when we train the denoising auto-encoder, and set \u039b \u2208 [1, 1.8] when we train GEC models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model and Training Settings", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "During decoding, we use a beam-size of 12 and normalize model scores by length. We do not use reranking when evaluating the CoNLL-2014 data sets. But we rerank the top 12 hypothesizes using the language model trained on Common Crawl (Junczys-Dowmunt and Grundkiewicz, 2016) for the JFLEG test sets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 233, |
|
"end": 273, |
|
"text": "(Junczys-Dowmunt and Grundkiewicz, 2016)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model and Training Settings", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We compare our results with the well-known GEC systems, as shown in Table 4 . Rule, classification, statistical machine translation (SMT), and neural machine translation (NMT) based systems were built for the GEC task. We list the well-known models on the top section of Table 4 and our results in the middle. Almost all the previous systems reranked their top 12 results using a big language model and some of them used partially pretrained parameters, which improve their results by 1.5 to 5 F 0.5 score. Our copy-augmented architecture achieve a 56.42 F 0.5 score on the CoNLL-2014 dataset and outperforms all the previous architectures even without reranking or pre-training.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 68, |
|
"end": 75, |
|
"text": "Table 4", |
|
"ref_id": "TABREF6" |
|
}, |
|
{ |
|
"start": 271, |
|
"end": 278, |
|
"text": "Table 4", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Combined with denoising auto-encoders and multi-tasks, our model achieve a 61.15 F 0.5 score on the CoNLL-2014 data set. This result exceeds the previous state-of-the-art system +4.9 F 0.5 points.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "In the bottom section of Table 4 , we list the results of (Ge et al., 2018) . No direct comparison can be made between us, because they used the non-public Cambridge Learner Corpus (CLC) (Nicholls, 2003) and their own collected nonpublic Lang-8 corpus, making their labeled training data set 3.6 times larger than ours. Even so, our results on the CoNLL 2014 test data set and JFLEG test data set are very close to theirs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 58, |
|
"end": 75, |
|
"text": "(Ge et al., 2018)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 187, |
|
"end": 203, |
|
"text": "(Nicholls, 2003)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 25, |
|
"end": 32, |
|
"text": "Table 4", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "In Table 4 , \"SMT (with LM)\" refers to (Junczys-Dowmunt and Grundkiewicz, 2014 \"SMT Rule-Based Hybird\" refers to (Felice et al., 2014) ; \"SMT Classification Hybird\" refers to (Rozovskaya and Roth, 2016); \"Neural Hybird MT\" refers to (Ji et al., 2017) ; \"CNN + EO\" refers to (Chollampatt and Ng, 2018) and \"EO\" means rerank with edit-operation features; \"Transformer + MIMs\" refers to and \"MIMs\" means model indepent methods; \"NMT SMT Hybrid\" refers to ; \"CNN + FB Learning\" refers to (Ge et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 39, |
|
"end": 78, |
|
"text": "(Junczys-Dowmunt and Grundkiewicz, 2014", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 113, |
|
"end": 134, |
|
"text": "(Felice et al., 2014)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 233, |
|
"end": 250, |
|
"text": "(Ji et al., 2017)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 274, |
|
"end": 300, |
|
"text": "(Chollampatt and Ng, 2018)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 484, |
|
"end": 501, |
|
"text": "(Ge et al., 2018)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 4", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "In this section, we compare the Transformer architecture's results with and without copying mechanism on the GEC task. As illustrated in Table 5 , copy-augmented model increases the F 0.5 score from 48.07 to 54.67, with a +6.6 absolute increase. Most of the improvements come from the words that are out of the fixed vocabulary, which will be predicted as a UNK word in the base model but will be copied as the word itself in the copyaugmented model. Copying is generally known as good at handling the UNK words. To verify if copying is more than copying UNK words, we do experiments by ignoring all UNK edits. From Table 5 , we can see that even ignoring the UNK benefits, the copyaugmented model is still 1.62 F 0.5 points higher than the baseline model, and most of the benefit comes from the increased recall.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 137, |
|
"end": 145, |
|
"text": "Table 5", |
|
"ref_id": "TABREF8" |
|
}, |
|
{ |
|
"start": 617, |
|
"end": 624, |
|
"text": "Table 5", |
|
"ref_id": "TABREF8" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Copying Ablation Results", |
|
"sec_num": "5.4.1" |
|
}, |
|
{ |
|
"text": "From Table 5 , we can observe that by partially pretraining the decoder, the F 0.5 score is improved from 54.67 to 57.21 (+2.54). It is an evident improvment compared to the un-pre-trained ones. However, the denoising auto-encoder improves the single model from 54.67 to 58.8 (+4.13). We can also see that both the precision and recall are improved after pre-training.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 5, |
|
"end": 12, |
|
"text": "Table 5", |
|
"ref_id": "TABREF8" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Pre-training Ablation Results", |
|
"sec_num": "5.4.2" |
|
}, |
|
{ |
|
"text": "To further investigate how good the pre-trained parameters are, we show the results of the early stage with and without the denoising autoencoder's pre-trained parameters in Table 6 . The results show, if we finetune the model for 1 epoch with the labeled training data, the pre-trained model beats the un-pretrained one with a big gap (48.89 vs 17.19). Even without finetune, the pretrained model can get a F 0.5 score of 31.33. This proves that pre-training gives the models much better initial parameters than the randomly picked ones.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 174, |
|
"end": 181, |
|
"text": "Table 6", |
|
"ref_id": "TABREF9" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Pre-training Ablation Results", |
|
"sec_num": "5.4.2" |
|
}, |
|
{ |
|
"text": "We add the sentence-level copying task to encourage the model outputs no edits when we input a correct sentence. To verify this, we create a correct sentence set by sampling 500 sentences from Wikipedia. Also, we generate an error sentence set by sampling 500 sentences from CoNLL-2013 test data set, which is an error-annotated dataset. Then we calculate the average value of the balance factor \u03b1 copy of the two sets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence-level Copying Task Ablation Results", |
|
"sec_num": "5.4.3" |
|
}, |
|
{ |
|
"text": "Before we add the sentence-level copying task, the \u03b1 copy is 0.44/0.45 for the correct and error sentence sets. After adding the sentence-level copying task, the value changed to 0.81/0.57. This means that 81% of the final score comes from copying on the correct sentence set, while only 57% on the error sentence set. By adding the sentence-level copying task, models learn to distinguish correct sentences and error sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence-level Copying Task Ablation Results", |
|
"sec_num": "5.4.3" |
|
}, |
|
{ |
|
"text": "To analyze how copying and generating divide their work. We visualized the copying attention alignment and the encoder-decoder attention alignment in Figure 2 . In Figure 2(a) , copying focus their weights on the next word in good order, while in Figure 2 (b), generating moves its attention more on the other words, e.g., the nearby words, and the end of the sentence. As explained in (Raganato et al., 2018) erating part tries to find long dependencies and attend more on global information. By separating the copying work from the generation work, the generation part of the model can focus more on the \"creative\" works.", |
|
"cite_spans": [ |
|
{ |
|
"start": 386, |
|
"end": 409, |
|
"text": "(Raganato et al., 2018)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 150, |
|
"end": 158, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 164, |
|
"end": 175, |
|
"text": "Figure 2(a)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 247, |
|
"end": 255, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Attention Visualization", |
|
"sec_num": "5.5" |
|
}, |
|
{ |
|
"text": "Automatic grammatical error correction is a complicated task since there are different kinds of errors and various correction ways. In this section, we analyze our systems' performance on different grammatical error types. (Ng et al., 2014) labeled CoNLL-2014 test set with 28 error types, and we list the recall percentage on the top 9 error types. We summarize the other 19 types in the last line of the table.", |
|
"cite_spans": [ |
|
{ |
|
"start": 223, |
|
"end": 240, |
|
"text": "(Ng et al., 2014)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Recall on Different Error Types", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "Our approach recalls 72.65% errors on the \"Noun number\" type and 61.79% on the \"Subject- Figure 2 : An example of the different behaviors between the copy and encoder-decoder attention. In each figure, the above line is the source sentence, where the error words are in italic. The bottom line is the corrected sentence, where the corrected words are in bold italic. The arrow means which source token the copy and encoder-decoder attention mainly focus on, when predicting the current word. \" bos \" refers to the begin of the sentence and \" eos \" refers to the end of the sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 89, |
|
"end": 97, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Recall on Different Error Types", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "Verb Agreement\" type. However, only 10.38% errors are recalled on the \"Wrong Collocation/Idiom\" type.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Recall on Different Error Types", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "Computers are good at the definite and mechanical errors, but still have a big gap with humans on the error types that are subjective and with cultural characteristics.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Recall on Different Error Types", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "Early published works in GEC develop specific classifiers for different error types and then use them to build hybrid systems. Later, leveraging the progress of statistical machine translation(SMT) and large-scale error corrected data, GEC systems are further improved treated as a translation problem. SMT systems can remember phrase-based correction pairs, but they are hard to generalize beyond what was seen in training. The CoNLL-14 shared task overview paper (Ng et al., 2014) provides a comparative evaluation of approaches. (Rozovskaya and Roth, 2016) detailed classification and machine translation approaches to grammatical error correction problems, and combined the strengths for both methods.", |
|
"cite_spans": [ |
|
{ |
|
"start": 532, |
|
"end": 559, |
|
"text": "(Rozovskaya and Roth, 2016)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Recently, neural machine translation approaches have been shown to be very powerful. (Yannakoudakis et al., 2017 ) developed a neural sequence-labeling model for error detection to calculate the probability of each token in a sentence as being correct or incorrect, and then use the error detecting model's result as a feature to re-rank the N best hypotheses. (Ji et al., 2017) proposed a hybrid neural model incorporating both the word and character-level information. (Chollampatt and Ng, 2018) used a multilayer convolutional encoder-decoder neural network and outperforms all prior neural and statistical based systems on this task. tried deep RNN (Barone et al., 2017) and transformer (Vaswani et al., 2017) encoderdecoder models and got a higher result by using transformer and a set of model-independent methods for neural GEC.", |
|
"cite_spans": [ |
|
{ |
|
"start": 85, |
|
"end": 112, |
|
"text": "(Yannakoudakis et al., 2017", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 361, |
|
"end": 378, |
|
"text": "(Ji et al., 2017)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 471, |
|
"end": 497, |
|
"text": "(Chollampatt and Ng, 2018)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 649, |
|
"end": 674, |
|
"text": "RNN (Barone et al., 2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 691, |
|
"end": 713, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The state-of-the-art system on GEC task is achieved by (Ge et al., 2018) , which are based on the sequence-to-sequence framework and fluency boost learning and inference mechanism. However, the usage of the non-public CLC corpus (Nicholls, 2003) and self-collected non-public error-corrected sentence pairs from Lang-8 made their training data 3.6 times larger than the others and their results hard to compare.", |
|
"cite_spans": [ |
|
{ |
|
"start": 55, |
|
"end": 72, |
|
"text": "(Ge et al., 2018)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 229, |
|
"end": 245, |
|
"text": "(Nicholls, 2003)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We present a copy-augmented architecture for GEC, by considering the characteristics of this problem.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Firstly, we propose an enhanced copy-augmented architecture, which improves the sequence-to-sequence model's ability by directly copying the unchanged words and outof-vocabulary words from the source input tokens. Secondly, we fully pre-train the copyaugmented architecture using large-scale unlabeled data, leveraging denoising auto-encoders.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Thirdly, we introduce two auxiliary tasks for multi-task learning. Finally, we outperform the state-of-the-art automatic grammatical error correction system by a large margin. However, due to the complexity of the GEC problem, there is still a long way to go to make the automatic GEC systems as reliable as humans.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "https://github.com/pytorch/fairseq", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Deep architectures for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Valerio Miceli Barone", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jind\u0159ich", |
|
"middle": [], |
|
"last": "Helcl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1707.07631" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Antonio Valerio Miceli Barone, Jind\u0159ich Helcl, Rico Sennrich, Barry Haddow, and Alexandra Birch. 2017. Deep architectures for neural machine trans- lation. arXiv preprint arXiv:1707.07631.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "One billion word benchmark for measuring progress in statistical language modeling", |
|
"authors": [ |
|
{ |
|
"first": "Ciprian", |
|
"middle": [], |
|
"last": "Chelba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qi", |
|
"middle": [], |
|
"last": "Ge", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1312.3005" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ciprian Chelba, Tomas Mikolov, Mike Schuster, Qi Ge, Thorsten Brants, Phillipp Koehn, and Tony Robin- son. 2013. One billion word benchmark for measur- ing progress in statistical language modeling. arXiv preprint arXiv:1312.3005.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "A multilayer convolutional encoder-decoder neural network for grammatical error correction", |
|
"authors": [ |
|
{ |
|
"first": "Shamil", |
|
"middle": [], |
|
"last": "Chollampatt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hwee Tou", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1801.08831" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shamil Chollampatt and Hwee Tou Ng. 2018. A multi- layer convolutional encoder-decoder neural network for grammatical error correction. arXiv preprint arXiv:1801.08831.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "A unified architecture for natural language processing: Deep neural networks with multitask learning", |
|
"authors": [ |
|
{ |
|
"first": "Ronan", |
|
"middle": [], |
|
"last": "Collobert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 25th international conference on Machine learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "160--167", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ronan Collobert and Jason Weston. 2008. A unified architecture for natural language processing: Deep neural networks with multitask learning. In Pro- ceedings of the 25th international conference on Machine learning, pages 160-167. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Better evaluation for grammatical error correction", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Dahlmeier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hwee Tou", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 2012 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "568--572", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Dahlmeier and Hwee Tou Ng. 2012. Better evaluation for grammatical error correction. In Pro- ceedings of the 2012 Conference of the North Amer- ican Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 568-572. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Building a large annotated corpus of learner english: The nus corpus of learner english", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Dahlmeier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siew Mei", |
|
"middle": [], |
|
"last": "Hwee Tou Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the eighth workshop on innovative use of NLP for building educational applications", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "22--31", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Dahlmeier, Hwee Tou Ng, and Siew Mei Wu. 2013. Building a large annotated corpus of learner english: The nus corpus of learner english. In Pro- ceedings of the eighth workshop on innovative use of NLP for building educational applications, pages 22-31.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Instanceaware semantic segmentation via multi-task network cascades", |
|
"authors": [ |
|
{ |
|
"first": "Jifeng", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kaiming", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3150--3158", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jifeng Dai, Kaiming He, and Jian Sun. 2016. Instance- aware semantic segmentation via multi-task network cascades. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3150-3158.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.04805" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing. arXiv preprint arXiv:1810.04805.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Grammatical error correction using hybrid systems and type filtering", |
|
"authors": [ |
|
{ |
|
"first": "Mariano", |
|
"middle": [], |
|
"last": "Felice", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zheng", |
|
"middle": [], |
|
"last": "Yuan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "\u00d8istein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Helen", |
|
"middle": [], |
|
"last": "Andersen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ekaterina", |
|
"middle": [], |
|
"last": "Yannakoudakis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kochmar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the Eighteenth Conference on Computational Natural Language Learning: Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "15--24", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mariano Felice, Zheng Yuan, \u00d8istein E Andersen, He- len Yannakoudakis, and Ekaterina Kochmar. 2014. Grammatical error correction using hybrid systems and type filtering. In Proceedings of the Eigh- teenth Conference on Computational Natural Lan- guage Learning: Shared Task, pages 15-24.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Reaching human-level performance in automatic grammatical error correction: An empirical study", |
|
"authors": [ |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Ge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Furu", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1807.01270" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tao Ge, Furu Wei, and Ming Zhou. 2018. Reaching human-level performance in automatic grammatical error correction: An empirical study. arXiv preprint arXiv:1807.01270.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Convolutional sequence to sequence learning", |
|
"authors": [ |
|
{ |
|
"first": "Jonas", |
|
"middle": [], |
|
"last": "Gehring", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Auli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Grangier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Denis", |
|
"middle": [], |
|
"last": "Yarats", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yann N", |
|
"middle": [], |
|
"last": "Dauphin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1705.03122" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jonas Gehring, Michael Auli, David Grangier, De- nis Yarats, and Yann N Dauphin. 2017. Convolu- tional sequence to sequence learning. arXiv preprint arXiv:1705.03122.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Near human-level performance in grammatical error correction with hybrid machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Roman", |
|
"middle": [], |
|
"last": "Grundkiewicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcin", |
|
"middle": [], |
|
"last": "Junczys-Dowmunt", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1804.05945" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roman Grundkiewicz and Marcin Junczys-Dowmunt. 2018. Near human-level performance in grammati- cal error correction with hybrid machine translation. arXiv preprint arXiv:1804.05945.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Incorporating copying mechanism in sequence-to-sequence learning", |
|
"authors": [ |
|
{ |
|
"first": "Jiatao", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhengdong", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Victor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1603.06393" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiatao Gu, Zhengdong Lu, Hang Li, and Victor OK Li. 2016. Incorporating copying mechanism in sequence-to-sequence learning. arXiv preprint arXiv:1603.06393.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "A nested attention neural hybrid model for grammatical error correction", |
|
"authors": [ |
|
{ |
|
"first": "Jianshu", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qinlong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yongen", |
|
"middle": [], |
|
"last": "Gong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Truong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1707.02026" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jianshu Ji, Qinlong Wang, Kristina Toutanova, Yon- gen Gong, Steven Truong, and Jianfeng Gao. 2017. A nested attention neural hybrid model for grammatical error correction. arXiv preprint arXiv:1707.02026.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Data recombination for neural semantic parsing", |
|
"authors": [ |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1606.03622" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robin Jia and Percy Liang. 2016. Data recombina- tion for neural semantic parsing. arXiv preprint arXiv:1606.03622.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "The amu system in the conll-2014 shared task: Grammatical error correction by data-intensive and feature-rich statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Marcin", |
|
"middle": [], |
|
"last": "Junczys", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "-", |
|
"middle": [], |
|
"last": "Dowmunt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roman", |
|
"middle": [], |
|
"last": "Grundkiewicz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the Eighteenth Conference on Computational Natural Language Learning: Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "25--33", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcin Junczys-Dowmunt and Roman Grundkiewicz. 2014. The amu system in the conll-2014 shared task: Grammatical error correction by data-intensive and feature-rich statistical machine translation. In Proceedings of the Eighteenth Conference on Com- putational Natural Language Learning: Shared Task, pages 25-33.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Phrase-based machine translation is state-ofthe-art for automatic grammatical error correction", |
|
"authors": [ |
|
{ |
|
"first": "Marcin", |
|
"middle": [], |
|
"last": "Junczys", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "-", |
|
"middle": [], |
|
"last": "Dowmunt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roman", |
|
"middle": [], |
|
"last": "Grundkiewicz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1605.06353" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcin Junczys-Dowmunt and Roman Grundkiewicz. 2016. Phrase-based machine translation is state-of- the-art for automatic grammatical error correction. arXiv preprint arXiv:1605.06353.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Approaching neural grammatical error correction as a low-resource machine translation task", |
|
"authors": [ |
|
{ |
|
"first": "Marcin", |
|
"middle": [], |
|
"last": "Junczys-Dowmunt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roman", |
|
"middle": [], |
|
"last": "Grundkiewicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shubha", |
|
"middle": [], |
|
"last": "Guha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenneth", |
|
"middle": [], |
|
"last": "Heafield", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1804.05940" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcin Junczys-Dowmunt, Roman Grundkiewicz, Shubha Guha, and Kenneth Heafield. 2018. Ap- proaching neural grammatical error correction as a low-resource machine translation task. arXiv preprint arXiv:1804.05940.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Learned in translation: Contextualized word vectors", |
|
"authors": [ |
|
{ |
|
"first": "Bryan", |
|
"middle": [], |
|
"last": "Mccann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Bradbury", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6294--6305", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bryan McCann, James Bradbury, Caiming Xiong, and Richard Socher. 2017. Learned in translation: Con- textualized word vectors. In Advances in Neural In- formation Processing Systems, pages 6294-6305.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Distributed representations of words and phrases and their compositionality", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3111--3119", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Cor- rado, and Jeff Dean. 2013. Distributed representa- tions of words and phrases and their compositional- ity. In Advances in neural information processing systems, pages 3111-3119.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Jfleg: A fluency corpus and benchmark for grammatical error correction", |
|
"authors": [ |
|
{ |
|
"first": "Courtney", |
|
"middle": [], |
|
"last": "Napoles", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Keisuke", |
|
"middle": [], |
|
"last": "Sakaguchi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joel", |
|
"middle": [], |
|
"last": "Tetreault", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1702.04066" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Courtney Napoles, Keisuke Sakaguchi, and Joel Tetreault. 2017. Jfleg: A fluency corpus and benchmark for grammatical error correction. arXiv preprint arXiv:1702.04066.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "A method for solving the convex programming problem with convergence rate o (1/k\u02c62)", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Yurii", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nesterov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1983, |
|
"venue": "Dokl. Akad. Nauk SSSR", |
|
"volume": "269", |
|
"issue": "", |
|
"pages": "543--547", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yurii E Nesterov. 1983. A method for solving the con- vex programming problem with convergence rate o (1/k\u02c62). In Dokl. Akad. Nauk SSSR, volume 269, pages 543-547.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "The conll-2014 shared task on grammatical error correction", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hwee Tou Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mei", |
|
"middle": [], |
|
"last": "Siew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ted", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Briscoe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raymond", |
|
"middle": [ |
|
"Hendy" |
|
], |
|
"last": "Hadiwinoto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Susanto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bryant", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the Eighteenth Conference on Computational Natural Language Learning: Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--14", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hwee Tou Ng, Siew Mei Wu, Ted Briscoe, Christian Hadiwinoto, Raymond Hendy Susanto, and Christo- pher Bryant. 2014. The conll-2014 shared task on grammatical error correction. In Proceedings of the Eighteenth Conference on Computational Natural Language Learning: Shared Task, pages 1-14.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "The cambridge learner corpus: Error coding and analysis for lexicography and elt", |
|
"authors": [ |
|
{ |
|
"first": "Diane", |
|
"middle": [], |
|
"last": "Nicholls", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of the Corpus Linguistics", |
|
"volume": "16", |
|
"issue": "", |
|
"pages": "572--581", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diane Nicholls. 2003. The cambridge learner corpus: Error coding and analysis for lexicography and elt. In Proceedings of the Corpus Linguistics 2003 con- ference, volume 16, pages 572-581.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. Glove: Global vectors for word representation. In Proceedings of the 2014 confer- ence on empirical methods in natural language pro- cessing (EMNLP), pages 1532-1543.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Deep contextualized word representations", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Matthew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1802.05365" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew E Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word rep- resentations. arXiv preprint arXiv:1802.05365.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "An analysis of encoder representations in transformerbased machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Raganato", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00f6rg", |
|
"middle": [], |
|
"last": "Tiedemann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alessandro Raganato, J\u00f6rg Tiedemann, et al. 2018. An analysis of encoder representations in transformer- based machine translation. In Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Unsupervised pretraining for sequence to sequence learning", |
|
"authors": [ |
|
{ |
|
"first": "Prajit", |
|
"middle": [], |
|
"last": "Ramachandran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc V", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1611.02683" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Prajit Ramachandran, Peter J Liu, and Quoc V Le. 2016. Unsupervised pretraining for sequence to se- quence learning. arXiv preprint arXiv:1611.02683.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Grammatical error correction: Machine translation and classifiers", |
|
"authors": [ |
|
{ |
|
"first": "Alla", |
|
"middle": [], |
|
"last": "Rozovskaya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "2205--2215", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alla Rozovskaya and Dan Roth. 2016. Grammatical error correction: Machine translation and classifiers. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), volume 1, pages 2205-2215.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Reassessing the goals of grammatical error correction: Fluency instead of grammaticality", |
|
"authors": [ |
|
{ |
|
"first": "Keisuke", |
|
"middle": [], |
|
"last": "Sakaguchi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Courtney", |
|
"middle": [], |
|
"last": "Napoles", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Post", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joel", |
|
"middle": [], |
|
"last": "Tetreault", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Transactions of the Association of Computational Linguistics", |
|
"volume": "4", |
|
"issue": "1", |
|
"pages": "169--182", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Keisuke Sakaguchi, Courtney Napoles, Matt Post, and Joel Tetreault. 2016. Reassessing the goals of gram- matical error correction: Fluency instead of gram- maticality. Transactions of the Association of Com- putational Linguistics, 4(1):169-182.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Get to the point: Summarization with pointer-generator networks", |
|
"authors": [ |
|
{ |
|
"first": "Abigail", |
|
"middle": [], |
|
"last": "See", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1704.04368" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abigail See, Peter J Liu, and Christopher D Man- ning. 2017. Get to the point: Summarization with pointer-generator networks. arXiv preprint arXiv:1704.04368.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Deep multi-task learning with low level tasks supervised at lower layers", |
|
"authors": [ |
|
{ |
|
"first": "Anders", |
|
"middle": [], |
|
"last": "S\u00f8gaard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "231--235", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anders S\u00f8gaard and Yoav Goldberg. 2016. Deep multi-task learning with low level tasks supervised at lower layers. In Proceedings of the 54th Annual Meeting of the Association for Computational Lin- guistics (Volume 2: Short Papers), volume 2, pages 231-235.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Tense and aspect error correction for esl learners using global context", |
|
"authors": [ |
|
{ |
|
"first": "Toshikazu", |
|
"middle": [], |
|
"last": "Tajiri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mamoru", |
|
"middle": [], |
|
"last": "Komachi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuji", |
|
"middle": [], |
|
"last": "Matsumoto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "198--202", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Toshikazu Tajiri, Mamoru Komachi, and Yuji Mat- sumoto. 2012. Tense and aspect error correction for esl learners using global context. In Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics: Short Papers-Volume 2, pages 198-202. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Pro- cessing Systems, pages 5998-6008.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Extracting and composing robust features with denoising autoencoders", |
|
"authors": [ |
|
{ |
|
"first": "Pascal", |
|
"middle": [], |
|
"last": "Vincent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hugo", |
|
"middle": [], |
|
"last": "Larochelle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierre-Antoine", |
|
"middle": [], |
|
"last": "Manzagol", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 25th international conference on Machine learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1096--1103", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pascal Vincent, Hugo Larochelle, Yoshua Bengio, and Pierre-Antoine Manzagol. 2008. Extracting and composing robust features with denoising autoen- coders. In Proceedings of the 25th international conference on Machine learning, pages 1096-1103. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "A new dataset and method for automatically grading esol texts", |
|
"authors": [ |
|
{ |
|
"first": "Helen", |
|
"middle": [], |
|
"last": "Yannakoudakis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ted", |
|
"middle": [], |
|
"last": "Briscoe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Medlock", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "180--189", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Helen Yannakoudakis, Ted Briscoe, and Ben Medlock. 2011. A new dataset and method for automatically grading esol texts. In Proceedings of the 49th An- nual Meeting of the Association for Computational Linguistics: Human Language Technologies-Volume 1, pages 180-189. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Neural sequence-labelling models for grammatical error correction", |
|
"authors": [ |
|
{ |
|
"first": "Helen", |
|
"middle": [], |
|
"last": "Yannakoudakis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marek", |
|
"middle": [], |
|
"last": "Rei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "\u00d8istein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zheng", |
|
"middle": [], |
|
"last": "Andersen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Yuan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2795--2806", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Helen Yannakoudakis, Marek Rei, \u00d8istein E Andersen, and Zheng Yuan. 2017. Neural sequence-labelling models for grammatical error correction. In Pro- ceedings of the 2017 Conference on Empirical Meth- ods in Natural Language Processing, pages 2795- 2806.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Facial landmark detection by deep multi-task learning", |
|
"authors": [ |
|
{ |
|
"first": "Zhanpeng", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ping", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chen", |
|
"middle": [ |
|
"Change" |
|
], |
|
"last": "Loy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaoou", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "European Conference on Computer Vision", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "94--108", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhanpeng Zhang, Ping Luo, Chen Change Loy, and Xiaoou Tang. 2014. Facial landmark detection by deep multi-task learning. In European Conference on Computer Vision, pages 94-108. Springer.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "Copy-Augmented Architecture.", |
|
"uris": null |
|
}, |
|
"TABREF3": { |
|
"html": null, |
|
"text": "Training Corpus", |
|
"type_str": "table", |
|
"content": "<table><tr><td>Corpus</td><td colspan=\"3\">Sent. Annot. Metric</td></tr><tr><td colspan=\"2\">CoNLL-2013 1,381</td><td>1</td><td>M 2</td></tr><tr><td colspan=\"2\">CoNLL-2014 1,312</td><td>2</td><td>M 2</td></tr><tr><td>JFLEG</td><td>747</td><td>4</td><td>GLEU</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF4": { |
|
"html": null, |
|
"text": "", |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"num": null |
|
}, |
|
"TABREF6": { |
|
"html": null, |
|
"text": "Comparison of GEC systems on CoNLL-2014 and JFLEG test set. The M 2 score for CoNLL-2014 test dataset and the GLEU for the JFLEG test set are reported. DA refers to the \"Denoising Auto-encoder\". (with LM) refers to the usage of an extra language model. (4 ens.) refers to the ensemble decoding of 4 independently trained models. We re-rank the results of the top 12 hypothesizes for the JFLEG test set with an extra language model and marked them with", |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"num": null |
|
}, |
|
"TABREF8": { |
|
"html": null, |
|
"text": "Single Model Ablation Study on CoNLL 2014 Test Data Set.", |
|
"type_str": "table", |
|
"content": "<table><tr><td>Finetune</td><td>Pre. Rec.</td><td>F 0.5</td></tr><tr><td colspan=\"2\">with the denoising auto-encoder</td><td/></tr><tr><td>no finetune</td><td colspan=\"2\">36.61 19.87 31.33</td></tr><tr><td colspan=\"3\">finetune 1 epoch 68.58 22.76 48.89</td></tr><tr><td colspan=\"3\">without the denoising auto-encoder</td></tr><tr><td colspan=\"3\">finetune 1 epoch 32.55 05.96 17.19</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF9": { |
|
"html": null, |
|
"text": "", |
|
"type_str": "table", |
|
"content": "<table><tr><td>:</td><td>Denoising Auto-encoder's Results on</td></tr><tr><td colspan=\"2\">CoNLL-2014 Test Data Set.</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF10": { |
|
"html": null, |
|
"text": ", this means that the gen-", |
|
"type_str": "table", |
|
"content": "<table><tr><td>Error Type</td><td>%</td><td>Recall</td></tr><tr><td>Article Or Determiner</td><td colspan=\"2\">14.31% 44.54%</td></tr><tr><td colspan=\"3\">Wrong Collocation/Idiom 12.75% 10.38%</td></tr><tr><td colspan=\"3\">Spelling, Punctuation, etc. 12.47% 45.66%</td></tr><tr><td>Preposition</td><td colspan=\"2\">10.38% 49.03%</td></tr><tr><td>Noun number</td><td colspan=\"2\">9.38% 72.65%</td></tr><tr><td>Verb Tense</td><td colspan=\"2\">5.41% 28.15%</td></tr><tr><td>Subject-Verb Agreement</td><td colspan=\"2\">4.93% 61.79%</td></tr><tr><td>Verb form</td><td colspan=\"2\">4.69% 57.26%</td></tr><tr><td>Redundancy</td><td colspan=\"2\">4.65% 25.86%</td></tr><tr><td>Others</td><td colspan=\"2\">20.99% 23.28%</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF11": { |
|
"html": null, |
|
"text": "Recall on Different Error Types. % is the percentage of this error type in the test data set. Recall is the percentage of the fixed errors in each error type.", |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |