|
{ |
|
"paper_id": "I17-1048", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:39:02.839895Z" |
|
}, |
|
"title": "A Neural Language Model for Dynamically Representing the Meanings of Unknown Words and Entities in a Discourse", |
|
"authors": [ |
|
{ |
|
"first": "Sosuke", |
|
"middle": [], |
|
"last": "Kobayashi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Preferred Networks, Inc", |
|
"location": { |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Naoaki", |
|
"middle": [], |
|
"last": "Okazaki", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Tokyo Institute of Technology", |
|
"location": { |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "okazaki@c.titech.ac.jp" |
|
}, |
|
{ |
|
"first": "Kentaro", |
|
"middle": [], |
|
"last": "Inui", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Tohoku University", |
|
"location": { |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "inui@ecei.tohoku.ac.jp" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This study addresses the problem of identifying the meaning of unknown words or entities in a discourse with respect to the word embedding approaches used in neural language models. We proposed a method for on-the-fly construction and exploitation of word embeddings in both the input and output layers of a neural model by tracking contexts. This extends the dynamic entity representation used in Kobayashi et al. (2016) and incorporates a copy mechanism proposed independently by Gu et al. (2016) and Gulcehre et al. (2016). In addition, we construct a new task and dataset called Anonymized Language Modeling for evaluating the ability to capture word meanings while reading. Experiments conducted using our novel dataset show that the proposed variant of RNN language model outperformed the baseline model. Furthermore, the experiments also demonstrate that dynamic updates of an output layer help a model predict reappearing entities, whereas those of an input layer are effective to predict words following reappearing entities.", |
|
"pdf_parse": { |
|
"paper_id": "I17-1048", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This study addresses the problem of identifying the meaning of unknown words or entities in a discourse with respect to the word embedding approaches used in neural language models. We proposed a method for on-the-fly construction and exploitation of word embeddings in both the input and output layers of a neural model by tracking contexts. This extends the dynamic entity representation used in Kobayashi et al. (2016) and incorporates a copy mechanism proposed independently by Gu et al. (2016) and Gulcehre et al. (2016). In addition, we construct a new task and dataset called Anonymized Language Modeling for evaluating the ability to capture word meanings while reading. Experiments conducted using our novel dataset show that the proposed variant of RNN language model outperformed the baseline model. Furthermore, the experiments also demonstrate that dynamic updates of an output layer help a model predict reappearing entities, whereas those of an input layer are effective to predict words following reappearing entities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Language models that use probability distributions over sequences of words are found in many natural language processing applications, including speech recognition, machine translation, text summarization, and dialogue utterance generation. Recent studies have demonstrated that language models trained using neural network (Bengio et al., 2003; Mikolov et al., 2010) such as recurrent neural network (RNN) (Jozefowicz et al., 2016) Figure 1 : Dynamic Neural Text Modeling: the embeddings of unknown words, denoted by coreference indexes \"[ k ]\" are dynamically computed and used in both the input and output layers (x [k] and y [k] ) of a RNN language model. These are constructed from contextual information (d [k] ,i ) preceding the current (i + 1)-th sentence.", |
|
"cite_spans": [ |
|
{ |
|
"start": 324, |
|
"end": 345, |
|
"text": "(Bengio et al., 2003;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 346, |
|
"end": 367, |
|
"text": "Mikolov et al., 2010)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 407, |
|
"end": 432, |
|
"text": "(Jozefowicz et al., 2016)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 619, |
|
"end": 622, |
|
"text": "[k]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 629, |
|
"end": 632, |
|
"text": "[k]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 710, |
|
"end": 716, |
|
"text": "(d [k]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 433, |
|
"end": 441, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "et al., 2016) achieve the best performance across a range of corpora (Mikolov et al., 2010; Chelba et al., 2014; Merity et al., 2017; Grave et al., 2017) . However, current neural language models have a major drawback: the language model works only when applied to a closed vocabulary of fixed size (usually comprising high-frequency words from the given training corpus). All occurrences of outof-vocabulary words are replaced with a single dummy token \"<unk>\", showing that the word is unknown. For example, the word sequence, Pikotaro sings PPAP on YouTube is treated as <unk> sings <unk> on <unk> assuming that the words Pikotaro, PPAP, and YouTube are out of the vocabulary. The model therefore assumes that these words have the same meaning, which is clearly incorrect. The derivation of meanings of unknown words remains a persistent and nontrivial challenge when using word embeddings.", |
|
"cite_spans": [ |
|
{ |
|
"start": 69, |
|
"end": 91, |
|
"text": "(Mikolov et al., 2010;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 92, |
|
"end": 112, |
|
"text": "Chelba et al., 2014;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 113, |
|
"end": 133, |
|
"text": "Merity et al., 2017;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 134, |
|
"end": 153, |
|
"text": "Grave et al., 2017)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In addition, existing language models further assume that the meaning of a word is the same and universal across different documents. Neural language models also make this assumption and represent all occurrences of a word with a single word vector across all documents. However, the assumption of a universal meaning is also unlikely correct. For example, the name John is likely to refer to different individuals in different documents.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "d [1],1 d [1],0 d [2],0 d [4],0 d [3],0 d [2],2 d [1],2 d [3],2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In one story, John may be a pianist while another John denoted in a second story may be an infant. A model that represents all occurrences of John with the same vector fails to capture the very different behavior expected from John as a pianist and John as an infant.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this study, we address these issues and propose a novel neural language model that can build and dynamically change distributed representations of words based on the multi-sentential discourse. The idea of incorporating dynamic meaning representations into neural networks is not new. In the context of reading comprehension, Kobayashi et al. (2016) proposed a model that dynamically computes the representation of a named entity mention from the local context given by its prior occurrences in the text. In neural machine translation, the copy mechanism was proposed as a way of improving the handling of outof-vocabulary words (e.g., named entities) in a source sentence (Gu et al., 2016; Gulcehre et al., 2016) . We use a variant of recurrent neural language model (RNLM), that combines dynamic representation and the copy mechanism. The resulting novel model, Dynamic Neural Text Model, uses the dynamic word embeddings that are constructed from the context in the output and input layers of an RNLM, as shown in Figures 1 and 2.", |
|
"cite_spans": [ |
|
{ |
|
"start": 329, |
|
"end": 352, |
|
"text": "Kobayashi et al. (2016)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 676, |
|
"end": 693, |
|
"text": "(Gu et al., 2016;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 694, |
|
"end": 716, |
|
"text": "Gulcehre et al., 2016)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The contributions of this paper are three-fold. First, we propose a novel neural language model, which we named the Dynamic Neural Text Model. Second, we introduce a new evaluation task and dataset called Anonymized Language Modeling. This dataset can be used to evaluate the ability of a language model to capture word meanings from contextual information (Figure 3 ). This task involves a kind of one-shot learning tasks, in which the meanings of entities are inferred from their limited prior occurrences. Third, our experimental results indicate that the proposed model outperforms baseline models that use only global and static word embeddings in the input and/or output layers of an RNLM. Dynamic updates of the output layer helps the RNLM predict reappearing entities, whereas those of the input layer are effective to predict words following reappearing entities. A more detailed analysis showed that the method was able to successfully capture the meanings of words across large contexts, and to accumulate multiple context information.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 357, |
|
"end": 366, |
|
"text": "(Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Given a sequence of N tokens of a document D = (w 1 , w 2 , ..., w N ), an RNN language model computes the probability p(D) = N t=1 p(w t |w 1 , ..., w t\u22121 ). The computation of each factorized probability p(w t |w 1 , ..., w t\u22121 ) can also be viewed as the task of predicting a following word w t from the preceding words (w 1 , ..., w t\u22121 ). Typically, RNNs recurrently compute the probability of the following word w t by using a hidden state h t\u22121 at time step t \u2212 1,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "RNN Language Model", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p(w t |w 1 , ..., w t\u22121 ) = exp( h t\u22121 y wt + b wt ) w\u2208V exp( h t\u22121 y w + b w ) ,", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "RNN Language Model", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "h t = \u2212 \u2212\u2212 \u2192 RNN(x wt , h t\u22121 ).", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "RNN Language Model", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Here, x wt and y wt denote the input and output word embeddings of w t respectively, V represents the set of words in the vocabulary, and b w is a bias value applied when predicting the word w. The function \u2212 \u2212\u2212 \u2192 RNN is often replaced with LSTM (Hochreiter and Schmidhuber, 1997) or GRU (Cho et al., 2014) to improve performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 246, |
|
"end": 280, |
|
"text": "(Hochreiter and Schmidhuber, 1997)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 288, |
|
"end": 306, |
|
"text": "(Cho et al., 2014)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "RNN Language Model", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "RNN-based models have been reported to achieve better results on the CNN QA reading comprehension dataset (Hermann et al., 2015; Kobayashi et al., 2016) . In the CNN QA dataset, every named entity in each document is anonymized. This is done to allow the ability to comprehend a document using neither prior nor external knowledge to be evaluated. To capture the meanings of such anonymized entities, Kobayashi et al. (2016) proposed a new model that they named dynamic entity representation. This encodes the local contexts of an entity and uses the resulting context vector as the word embedding of a subsequent occurrence of that entity in the input layer of the RNN. This model: (1) constructs context vectors d e,i from the local contexts of an entity e at the i-th sentence;", |
|
"cite_spans": [ |
|
{ |
|
"start": 106, |
|
"end": 128, |
|
"text": "(Hermann et al., 2015;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 129, |
|
"end": 152, |
|
"text": "Kobayashi et al., 2016)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 401, |
|
"end": 424, |
|
"text": "Kobayashi et al. (2016)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dynamic Entity Representation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "(2) merges multiple contexts of the entity e through max pooling and produces the dynamic representation d e,i ; and (3) replaces the embedding of the entity e in the (i + 1)-th sentence with the dynamic embedding x e,i+1 produced from d e,i . More formally,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dynamic Entity Representation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "x e,i+1 = W dc d e,i + b e ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dynamic Entity Representation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "d e,i = maxpooling(d e,i , d e,i\u22121 ),", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Dynamic Entity Representation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "d e,i = ContextEncoder(e, i).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dynamic Entity Representation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Here, b e denotes a bias vector, maxpooling is a function that yields the largest value from the elementwise inputs, and ContextEncoder is an encoding function. Figure 2 gives an example of the process of encoding and merging contexts from sentences. An arbitrary encoder can be used for ContextEncoder; Kobayashi et al. (2016) used bidirectional RNNs, encoding the words surrounding the entity e of a sentence in both directions. If the entity e fails to appear in the i-th sentence, the embedding is not updated, i.e., d e,i = d e,i\u22121 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 304, |
|
"end": 327, |
|
"text": "Kobayashi et al. (2016)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 161, |
|
"end": 169, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dynamic Entity Representation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "In this section, we introduce the extension of dynamic entity representation to language modeling. From Equations 1 and 2, RNLM uses a set of word embeddings in the input layer to encode the preceding contextual words, and another set of word embeddings in the output layer to predict a word from the encoded context. Therefore, we consider incorporating the idea of dynamic representation into the word embeddings in the output layer (y w in Equation 1) as well as in the input layer (x w in Equation 2; refer to Figure 1 ). The novel extension of dynamic representation to the output layer affects predictions made for entities that appear repeatedly, whereas that in the input layer is expected to affect the prediction of words that follow the entities. The procedure for constructing dynamic representations of e, d e,i is the same as that introduced in Section 2.2. Before reading the (i + 1)-th sentence, the model constructs the context vectors [d e,1 , ..., d e,i ] from the local contexts of e in every preceding sentence. Here, d e,j denotes the context vector of e in the j-th sentence. ContextEncoder in the model produces a context vector d e for e at the t-th position in a sentence, using a bidirectional RNN 1 as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 514, |
|
"end": 522, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Proposed Method: Dynamic Neural Text Modeling", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "d e = ReLU(W hd [ h t\u22121 , h t+1 ]+b d ),", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "Proposed Method: Dynamic Neural Text Modeling", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "h t = \u2212 \u2212\u2212 \u2192 RNN(x wt , h t\u22121 ),", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "Proposed Method: Dynamic Neural Text Modeling", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "h", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Method: Dynamic Neural Text Modeling", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "t = \u2190 \u2212\u2212 \u2212 RNN(x wt , h t+1 ).", |
|
"eq_num": "(8)" |
|
} |
|
], |
|
"section": "Proposed Method: Dynamic Neural Text Modeling", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Here, ReLU denotes the ReLU activation function (Nair and Hinton, 2010) , while W dc and W hd correspond to learnable matrices; b d is a bias vector. As in the RNN language model, h t\u22121 and h t+1 as well as their composition d e can capture information necessary to predict the features of the target e at the t-th word. Following context encoding, the model merges the multiple context vectors, [d e,1 , ..., d e,i ], into the dynamic representation d e,i using a merging function. A range of functions are abailable for merging multiple vectors, while Kobayashi et al. (2016) used only max pooling (Equation 4). In this study, we explored three further functions: GRU, GRU followed by ReLU (d e,i = ReLU(GRU(d e,i , d e,i\u22121 ))) and a function that selects only the latest context, i.e., d e,i = d e,i . This comparison clarifies the effect of the accumulation of contexts as the experiments proceeded 2 . the hottest gift [ 1 ] could be [ 2 ] , but good luck finding one . as [ 3 ] reports , many stores have sold out of [ 2 ] even \u2026", |
|
"cite_spans": [ |
|
{ |
|
"start": 48, |
|
"end": 71, |
|
"text": "(Nair and Hinton, 2010)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 554, |
|
"end": 577, |
|
"text": "Kobayashi et al. (2016)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 978, |
|
"end": 983, |
|
"text": "[ 3 ]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Method: Dynamic Neural Text Modeling", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The hottest gift this Christmas could be Sony's new PlayStation 2, but good luck finding one. As Greg Lefevre reports, many stores have sold out of the game even \u2026 Original Version Figure 3 : An example document for Anonymized Language Modeling. Token \"[ k ]\" is an anonymized token that appears k-th in the entities in a document. Language models predict the next word from the preceding words, and calculate probabilities for whole word sequences.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 181, |
|
"end": 189, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Anonymized Version", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The merging function produces the dynamic representation d e,i of e. In language modeling, to read the (i + 1)-th sentence, the model uses two dynamic word embeddings of e in the input and output layers. The input embedding x e , used to encode contexts (Equation 2), and the output embedding y e , used to predict the occurrence of e (Equation 1), are replaced with dynamic versions:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Anonymized Version", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "x e = W dx d e,i + b x e ,", |
|
"eq_num": "(9)" |
|
} |
|
], |
|
"section": "Anonymized Version", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "y e = W dy d e,i + b y e ,", |
|
"eq_num": "(10)" |
|
} |
|
], |
|
"section": "Anonymized Version", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where W dx and W dy denote learnable matrices, and b x e and b y e denote learnable vectors tied to e. We can observe that a conventional RNN language model is a variant that removes the dynamic terms (W dx d e,i and W dy d e,i ) using only the static terms (b x e and b y e ) to represent e. The initial dynamic representation d e,0 is defined as a zero vector, so that the initial word embeddings (x e and y e ) are identical to the static terms (b x e and b y e ) until the point at which the first context of the target word e is observed. All parameters in the end-to-end model are learned entirely by backpropagation, maximizing the log-likelihood in the same way as a conventional RNN language model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Anonymized Version", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We can view the approach in Kobayashi et al. (2016) as a variant on the proposed method, but using the dynamic terms only in the input layer (for x e ). We can also view the copy mechanism (Gu et al., 2016; Gulcehre et al., 2016) as a variant on the proposed method, in which specific embeddings in the output layer are replaced with special dynamic vectors.", |
|
"cite_spans": [ |
|
{ |
|
"start": 28, |
|
"end": 51, |
|
"text": "Kobayashi et al. (2016)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 189, |
|
"end": 206, |
|
"text": "(Gu et al., 2016;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 207, |
|
"end": 229, |
|
"text": "Gulcehre et al., 2016)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Anonymized Version", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This study explores methods for on-the-fly capture and exploitation of the meanings of unknown words or entities in a discourse. To do this, we introduce a novel evaluation task and dataset that we called Anonymized Language Modeling. Figure 3 gives an example from the dataset. Briefly, the dataset anonymizes certain noun phrases, treating them as unknown words and retaining their coreference relations. This allows a language model to track the context of every noun phrase in the discourse. Other words are left unchanged, allowing the language model to preserve the context of the anonymized (unknown) words, and to infer their meanings from the known words. The process was inspired by Hermann et al. 2015, whose approach has been explored by the research on reading comprehension.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 235, |
|
"end": 243, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Anonymized Language Modeling", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "More precisely, we used the OntoNotes (Pradhan et al., 2012) corpus, which includes documents with coreferences and named entity tags manually annotated. We assigned an anonymous identifier to every coreference chain in the corpus 3 in order of first appearance 4 , and replaced mentions of a coreference chain with its identifier. In our experiments, each coreference chain was given a dynamic representation. Following Mikolov et al. (2010) , we limited the vocabulary to 10,000 words appearing frequently in the corpus. Finally, we inserted \"<bos>\" and \"<eos>\" tokens to mark the beginning and end of each sentence.", |
|
"cite_spans": [ |
|
{ |
|
"start": 421, |
|
"end": 442, |
|
"text": "Mikolov et al. (2010)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Anonymized Language Modeling", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "An important difference between this dataset and the one presented in Hermann et al. 2015is in the way that coreferences are treated. Hermann et al. (2015) used automatic resolusion of coreferences, whereas our study made use of the manual annotations in the OntoNotes. Thus, the process of Hermann et al. (2015) introduced (intentional and unintentional) errors into the dataset. Additionally, the dataset did not assign an entity iden- tifier to a pronoun. In contrast, as our dataset has access to the manual annotations of coreferences, we are able to investigate the ability of the language model to capture meanings from contexts.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Anonymized Language Modeling", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Dynamic updating could be applied to words in all lexical categories, including verbs, adjectives, and nouns without requiring additional extensions. However, verbs and adjectives were excluded from targets of dynamic updates in the experiments, for two reasons. First, proper nouns and nouns accounted for the majority (70%) of the low-frequency (unknown) words, followed by verbs (10%) and adjectives (9%). Second, we assumed that the meaning of a verb or adjective would shift less over the course of a discourse than that of a noun. When semantic information of unknown verbs and adjectives is required, their embeddings may be extracted from ad-hoc training on a different larger corpus. This, however, was beyond the scope of this study.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Anonymized Language Modeling", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "An experiment was conducted to investigate the effect of Dynamic Neural Text Model on the Anonymized Language Modeling dataset. The split of dataset followed that of the original corpus (Pradhan et al., 2012). Table 1 summarizes the statistics of the dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 210, |
|
"end": 217, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Setting", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "The baseline model was a typical LSTM RNN language model with 512 units. We compared three variants of the proposed model, using different applications of dynamic embedding: in the input layer only (as in Kobayashi et al. (2016) ), in the output layer only, and in both the input and output layers. The context encoders were bidirectional LSTMs with 512 units, the parameters of which were not the same as those in the LSTM RNN language models. All models were trained by maximizing the likelihood of correct tokens, to achieve best perplexity on the validation dataset 5 . Most hyper-parameters were tuned and fixed by the baseline model on the validation dataset 6 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 205, |
|
"end": 228, |
|
"text": "Kobayashi et al. (2016)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Setting", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "It is difficult to adequately train the all parts of a model using only the small dataset of Anonymized Language Modeling. We therefore pretrained word embeddings and ContextEncoder (the bidirectional RNNs and matrices in Equations 6-8) on a sentence completion task in which clozes were predicted from the surrounding words in a large corpus (Melamud et al., 2016) 7 . We used the objective function with negative sampling (Mikolov et al., 2013) :", |
|
"cite_spans": [ |
|
{ |
|
"start": 343, |
|
"end": 367, |
|
"text": "(Melamud et al., 2016) 7", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 424, |
|
"end": 446, |
|
"text": "(Mikolov et al., 2013)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Setting", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "e (log \u03c3(x e x e ) + v\u2208N eg (log \u03c3(\u2212x e x v ))). Here,x e is a context vector predicted by ContextEncoder, x e denotes the word embedding of a target word e appearing in the corpus, and N eg represents randomly sampled words. These pretrained parameters of ContextEncoder were fixed when the whole language model was trained on the Anonymized Language Modeling dataset. We implemented models in Python using the Chainer neural network library (Tokui et al., 2015) . The code and the constructed dataset are publicly available 8 . Table 2 shows performance of the baseline model and the three variants of the proposed method in terms of perplexity. The table reports the mean and standard error of three perplexity values after training using three different randomly chosen initializations (we used the same convention 5 We performed a validation at the end of every half epoch out of five epochs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 443, |
|
"end": 463, |
|
"text": "(Tokui et al., 2015)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 819, |
|
"end": 820, |
|
"text": "5", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 530, |
|
"end": 537, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Setting", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "6 Batchsize was 8. Adam (Kingma and Ba, 2015) with learning rate 10 \u22123 . Gradients were normalized so that their norm was smaller than 1. Truncation of backpropagation and updating was performed after every 20 sentences and at the end of document.", |
|
"cite_spans": [ |
|
{ |
|
"start": 24, |
|
"end": 45, |
|
"text": "(Kingma and Ba, 2015)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Perplexity", |
|
"sec_num": "5.2.1" |
|
}, |
|
{ |
|
"text": "7 We pretrained a model on the Gigaword Corpus, excluding sentences with more than 32 tokens. We performed training for 50000 iterations with a batch size of 128 and five negative samples. Only words that occurred no fewer than 500 times are used; other words were treated as unknown tokens. Melamud et al. (2016) used three different sets of word embeddings for the two inputs with respect to the encoders ( \u2212 \u2212\u2212 \u2192 RNN and \u2190 \u2212\u2212 \u2212 RNN) and the output (target). However, we forced the sets of word embeddings to share a single set of word embeddings in pretraining. We initialized the word embeddings in both the input layer (xw) and the output layer (yw) of the novel models, including the baseline model, with this single set. The word embeddings of all anonymized tokens were initialized as unknown words with the word embedding of \"<unk>\". throughout this paper). Here, we discuss the proposed method using GRU followed by ReLU as the merging function, as this achieved the best perplexity (see Section 5.2.2 for a comparison of functions). We also show perplexitiy values when evaluating words of specific categories: (1) all words;", |
|
"cite_spans": [ |
|
{ |
|
"start": 292, |
|
"end": 313, |
|
"text": "Melamud et al. (2016)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Perplexity", |
|
"sec_num": "5.2.1" |
|
}, |
|
{ |
|
"text": "(2) reappearing entity words;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Perplexity", |
|
"sec_num": "5.2.1" |
|
}, |
|
{ |
|
"text": "(3) words following entities; and (4) non-entity words. All variants of the proposed method outperformed the baseline model. Focusing on the categories (2) and (3) highlights the roles of dynamic updates of the input and output layers. Dynamic updates of the input layer (B) had a larger improvement for predicting words following entities (3) than those of the output layer (C). In contrast, dynamic updates of the output layer (C) were quite effective for predicting reappearing entities (2) whereas those of the input layer (B) were not. These facts confirm that: dynamic updates of the input layer help a model predict words following entities by supplying on-the-fly context information; and those of the output layer are effective to predict entity words appearing multiple times.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Perplexity", |
|
"sec_num": "5.2.1" |
|
}, |
|
{ |
|
"text": "In addition, dynamic updates of both the input and output layers (D) further improved the performance from those of either the output (C) or input (B) layer. Thus, the proposed dynamic output was shown to be compatible with dynamic input, and vice versa. These results demonstrated the positive effect of capturing and exploiting the contextsensitive meanings of entities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Perplexity", |
|
"sec_num": "5.2.1" |
|
}, |
|
{ |
|
"text": "In order to examine whether dynamic updates of the input and output embeddings capture contextsensitive meanings of entities, we present Figure 4 : Perplexity of all tokens relative to the time at which they appear in the document.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 137, |
|
"end": 145, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Perplexity", |
|
"sec_num": "5.2.1" |
|
}, |
|
{ |
|
"text": "especially in the latter part of documents, where repeated words are more likely to occur. Figure 5 shows the perplexity with respect to the frequency of words t within documents. Note that the word embedding at the first occurrence of an entity is static. This figure indicates that entities appearing many times enjoy the benefit of the dynamic language model. Figure 6 visualizes the perplexity of entities with respect to the numbers of their antecedent candidates. It is clear from this figure that the proposed method is better at memorizing the semantic information of entities appearing repeatedly in documents than the baseline. These results also demonstrated the contribution of dynamic updates of word embeddings. Table 3 compares models with different merging functions; GRU-ReLU, GRU, max pooling, and the use of the latest context. The use of the latest context had the worst performance for all variants of the proposed method. Thus, a proper accumulation of multiple contexts is indispensable for dynamic updates of word embeddings. Although Kobayashi et al. (2016) used only max pooling as the merging function, GRU and GRU-ReLU were shown to be comparable in performance and superior to max pooling when predicting tokens related to entities (2) and (3).", |
|
"cite_spans": [ |
|
{ |
|
"start": 1059, |
|
"end": 1082, |
|
"text": "Kobayashi et al. (2016)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 91, |
|
"end": 99, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 363, |
|
"end": 371, |
|
"text": "Figure 6", |
|
"ref_id": "FIGREF4" |
|
}, |
|
{ |
|
"start": 726, |
|
"end": 733, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Perplexity", |
|
"sec_num": "5.2.1" |
|
}, |
|
{ |
|
"text": "In order to examine contribution of the dynamic language models on a downstream task, we conducted cloze tests for comprehension of a sentence with reappearing entities in a discourse. Given multiple preceding entities E = {e + , e 1 , e 2 , ...} followed by a cloze sentence, the models were required to predict the true antecedent e + which allowed the cloze to be correctly filled, among the other alternatives E \u2212 = {e 1 , e 2 , ...}. Language models solve this task by comparing the likelihoods of sentences filled with antecedent candidates in E and returning the entity with the highest likelihood of the sentence. In this experiment, the performance of a model was represented by the Mean Quantile (MQ) (Guu et al., 2015) . The MQ computes the mean ratio at which the model predicts a correct antecedent e + more likely than negative antecedents in E \u2212 ,", |
|
"cite_spans": [ |
|
{ |
|
"start": 711, |
|
"end": 729, |
|
"text": "(Guu et al., 2015)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Predicting Entities by Likelihood of a Sentence", |
|
"sec_num": "5.2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "MQ = |{e \u2212 \u2208 E \u2212 : p(e \u2212 ) < p(e + )}| |E \u2212 | .", |
|
"eq_num": "(11)" |
|
} |
|
], |
|
"section": "Predicting Entities by Likelihood of a Sentence", |
|
"sec_num": "5.2.3" |
|
}, |
|
{ |
|
"text": "Here, p(e) denotes the likelihood of a sentence whose cloze is filled with e. If the correct antecedent e + yields highest likelihood, MQ gets 1. Table 4 reports MQs for the three variants and merging functions. Dynamic updates of the input layer greatly boosted the performance by approximately 10%, while using both dynamic input and output improved it further. In this experiment, the merging functions with GRUs outperform the others. These results demonstrated that Dynamic Neural Text Models can accumulate a new information in word embeddings and contribute to modeling the semantic changes of entities in a discourse. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 146, |
|
"end": 153, |
|
"text": "Table 4", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Predicting Entities by Likelihood of a Sentence", |
|
"sec_num": "5.2.3" |
|
}, |
|
{ |
|
"text": "An approach to addressing the unknown word problem used in recent studies (Kim et al., 2016; Sennrich et al., 2016; Luong and Manning, 2016; Schuster and Nakajima, 2012) comprises the embeddings of unknown words from character embeddings or subword embeddings. Li and Jurafsky (2015) applied word disambiguation and use a sense embedding to the target word. captured the context-sensitive meanings of common words using word embeddings, applied through a gating function controlled by history words, in the context of machine translation. In future work, we will explore a wider range of models, to integrate our dynamic text modeling with methods that estimate the meaning of unknown words or entities from their constituents. When addressing well-known entities such as Obama and Trump, it makes sense to learn their embeddings from external resources, as well as dynamically from the preceding context in a given discourse (as in our Dynamic Neural Text Model). The integration of these two sources of information is an intriguing challenge in language modeling.", |
|
"cite_spans": [ |
|
{ |
|
"start": 74, |
|
"end": 92, |
|
"text": "(Kim et al., 2016;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 93, |
|
"end": 115, |
|
"text": "Sennrich et al., 2016;", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 116, |
|
"end": 140, |
|
"text": "Luong and Manning, 2016;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 141, |
|
"end": 169, |
|
"text": "Schuster and Nakajima, 2012)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "A key aspect of our model is its incorporation of the copy mechanism (Gu et al., 2016; Gulcehre et al., 2016) , using dynamic word embeddings in the output layer. Independently of this study, several research groups have explored the use of variants of the copy mechanisms in language modeling (Merity et al., 2017; Grave et al., 2017; Peng and Roth, 2016) . These studies, however, did not incorporate dynamic representations in the input layer. In contrast, our proposal incorporates the copy mechanism through the use of dynamic representations in the output layer, integrating them with dynamic mechanisms in both the input and output layers by applying dynamic entity-wise representation. Our experiments have demonstrated the benefits of such integration.", |
|
"cite_spans": [ |
|
{ |
|
"start": 69, |
|
"end": 86, |
|
"text": "(Gu et al., 2016;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 87, |
|
"end": 109, |
|
"text": "Gulcehre et al., 2016)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 294, |
|
"end": 315, |
|
"text": "(Merity et al., 2017;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 316, |
|
"end": 335, |
|
"text": "Grave et al., 2017;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 336, |
|
"end": 356, |
|
"text": "Peng and Roth, 2016)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Another related trend in recent studies is the use of neural network to capture the information flow of a discourse. One approach has been to link RNNs across sentences (Wang and Cho, 2016; Serban et al., 2016) , while a second approach has expolited a type of memory space to store contextual information (Sukhbaatar et al., 2015; Tran et al., 2016; Merity et al., 2017) . Research on reading comprehension (Kobayashi et al., 2016; Henaff et al., 2017) and coreference resolution (Wiseman et al., 2016; Clark and Manning, 2016b,a) has shown the salience of entitywise context information. Our model could be located within such approaches, but is distinct in being the first model to make use of entity-wise context information in both the input and output layers for sentence generation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 169, |
|
"end": 189, |
|
"text": "(Wang and Cho, 2016;", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 190, |
|
"end": 210, |
|
"text": "Serban et al., 2016)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 306, |
|
"end": 331, |
|
"text": "(Sukhbaatar et al., 2015;", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 332, |
|
"end": 350, |
|
"text": "Tran et al., 2016;", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 351, |
|
"end": 371, |
|
"text": "Merity et al., 2017)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 408, |
|
"end": 432, |
|
"text": "(Kobayashi et al., 2016;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 433, |
|
"end": 453, |
|
"text": "Henaff et al., 2017)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 481, |
|
"end": 503, |
|
"text": "(Wiseman et al., 2016;", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 504, |
|
"end": 531, |
|
"text": "Clark and Manning, 2016b,a)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We summarize and compare works for entitycentric neural networks that read a document. Kobayashi et al. (2016) pioneered entity-centric neural models tracking states in a discourse. They proposed Dynamic Entity Representation, which encodes contexts of entities and updates the states using entity-wise memories. Wiseman et al. (2016) also proposed a method for managing similar entity-wise features on neural networks and improved a coreference resolution model. Clark and Manning (2016b,a) incorporated such entitywise representations in mention-ranking coreference models. Our paper follows Kobayashi et al. (2016) and exploits dynamic entity reprensetions in a neural language model, where dynamic reporesentations are used not only in the neural encoder but also in the decoder, applicable to various sequence generation tasks, e.g., machine translation and dialog response generation. Simultaneously with our paper, Ji et al. (2017) use dynamic entity representation in a neural language model for reranking outputs of a coreference resolution system. experiment language modeling with referring to internal contexts or external data. Henaff et al. (2017) focus on neural networks tracking contexts of entities, achieving the state-of-the-art result in bAbI ), a reading comprehension task. They encode the contexts of each entity by an attention-like gated RNN instead of using coreference links directly. Dhingra et al. (2017) also try to improve a reading comprehension model using coreference links. Similarly to our dynamic entity representation, Bahdanau et al. (2017) construct on-the-fly word embeddings of rare words from dictionary definitions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 87, |
|
"end": 110, |
|
"text": "Kobayashi et al. (2016)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 313, |
|
"end": 334, |
|
"text": "Wiseman et al. (2016)", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 464, |
|
"end": 491, |
|
"text": "Clark and Manning (2016b,a)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 594, |
|
"end": 617, |
|
"text": "Kobayashi et al. (2016)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 922, |
|
"end": 938, |
|
"text": "Ji et al. (2017)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 1141, |
|
"end": 1161, |
|
"text": "Henaff et al. (2017)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 1413, |
|
"end": 1434, |
|
"text": "Dhingra et al. (2017)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1558, |
|
"end": 1580, |
|
"text": "Bahdanau et al. (2017)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The fisrt key component of dynamic entity representation is a function to merge more than one contexts about an entity into a consistent representation of the entity. Various choices for the function exist, e.g., max or averagepooling (Kobayashi et al., 2016; Clark and Manning, 2016b) , RNN (GRU, LSTM (Wiseman et al., 2016; or other gated RNNs (Henaff et al., 2017; Ji et al., 2017) ), or using the latest context only (without any merging) . This paper is the first work comparing the effects of those choices (see Section 5.2.2).", |
|
"cite_spans": [ |
|
{ |
|
"start": 235, |
|
"end": 259, |
|
"text": "(Kobayashi et al., 2016;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 260, |
|
"end": 285, |
|
"text": "Clark and Manning, 2016b)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 303, |
|
"end": 325, |
|
"text": "(Wiseman et al., 2016;", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 346, |
|
"end": 367, |
|
"text": "(Henaff et al., 2017;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 368, |
|
"end": 384, |
|
"text": "Ji et al., 2017)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The second component is a function to encode local contexts from a given text, e.g., bidirectional RNN encoding (Kobayashi et al., 2016) , unidirectional RNN used in a language model (Ji et al., 2017; , feedforward neural network with a sentence vector and an entity's word vector (Henaff et al., 2017) or hand-crafted features with word embeddings (Wiseman et al., 2016; Clark and Manning, 2016b) . This study employs bi-RNN analogously to Kobayashi et al. (2016) , which can access full context with powerful learnable units.", |
|
"cite_spans": [ |
|
{ |
|
"start": 112, |
|
"end": 136, |
|
"text": "(Kobayashi et al., 2016)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 183, |
|
"end": 200, |
|
"text": "(Ji et al., 2017;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 281, |
|
"end": 302, |
|
"text": "(Henaff et al., 2017)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 349, |
|
"end": 371, |
|
"text": "(Wiseman et al., 2016;", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 372, |
|
"end": 397, |
|
"text": "Clark and Manning, 2016b)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 441, |
|
"end": 464, |
|
"text": "Kobayashi et al. (2016)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In the task setting proposed in this study, a model must capture the meaning of a given specific word from a small number of its contexts in a given discourse. The task could also be seen as novel one-shot learning (Fei-Fei et al., 2006) of word meanings. One-shot learning for NLP like this has been little studied, with the exception of the study by , which used a task in which the context of a target word is matched with a different context of the same word.", |
|
"cite_spans": [ |
|
{ |
|
"start": 215, |
|
"end": 237, |
|
"text": "(Fei-Fei et al., 2006)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "This study addressed the problem of identifying the meaning of unknown words or entities in a discourse with respect to the word embedding approaches used in neural language models. We proposed a method for on-the-fly construction and exploitation of word embeddings in both the input layer and output layer of a neural model by tracking contexts. This extended the dynamic entity representation presented in Kobayashi et al. (2016) , and incorporated a copy mechanism proposed independently by Gu et al. (2016) and Gulcehre et al. (2016) . In the course of the study, we also constructed a new task and dataset, called Anonymized Language Modeling, for evaluating the ability of a model to capture word meanings while reading. Experiments conducted using our novel dataset demonstrated that the RNN language model variants proposed in this study outperformed the baseline model. More detailed analysis indicated that the proposed method was particularly successful in capturing the meaning of an unknown words from texts containing few instances.", |
|
"cite_spans": [ |
|
{ |
|
"start": 409, |
|
"end": 432, |
|
"text": "Kobayashi et al. (2016)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 495, |
|
"end": 511, |
|
"text": "Gu et al. (2016)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 516, |
|
"end": 538, |
|
"text": "Gulcehre et al. (2016)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Equations 2 and 7 are identical but do not share internal parameters.2 Note that merging functions are not restricted to considering two arguments (a new context and a merged past context) recurrently but can consider all vectors over the wholehistory [d e,1 , ..., d e,i ] (e.g., by using attention mechanism(Bahdanau et al., 2015)). However, for simplicity, this research focuses only on the case of a function with two arguments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We used documents with no more than 50 clusters, which covered more than 97% of the corpus.4 Following the study of Luong et al. (2015), we assigned \"<unk1>\", \"<unk2>\", ... to coreference clusters in order of first appearance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work was supported by JSPS KAKENHI Grant Number 15H01702 and JSPS KAKENHI Grant Number 15H05318. We thank members of Preferred Networks, Inc., Makoto Miwa and Daichi Mochihashi for suggestive discussions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Merging function # of parameters (to be finetuned) ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Learning to compute word embeddings on the fly", |
|
"authors": [ |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Bosc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stanis\u0142aw", |
|
"middle": [], |
|
"last": "Jastrz\u0119bski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Grefenstette", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pascal", |
|
"middle": [], |
|
"last": "Vincent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1706.00286" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dzmitry Bahdanau, Tom Bosc, Stanis\u0142aw Jastrz\u0119bski, Edward Grefenstette, Pascal Vincent, and Yoshua Bengio. 2017. Learning to compute word embed- dings on the fly. arXiv preprint arXiv:1706.00286.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Neural machine translation by jointly learning to align and translate", |
|
"authors": [ |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2015. Neural machine translation by jointly learning to align and translate. In Proceedings of ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "A neural probabilistic language model", |
|
"authors": [ |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R\u00e9jean", |
|
"middle": [], |
|
"last": "Ducharme", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pascal", |
|
"middle": [], |
|
"last": "Vincent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Jauvin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "JOURNAL OF MACHINE LEARN-ING RESEARCH", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "1137--1155", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoshua Bengio, R\u00e9jean Ducharme, Pascal Vincent, and Christian Jauvin. 2003. A neural probabilistic lan- guage model. JOURNAL OF MACHINE LEARN- ING RESEARCH, 3:1137-1155.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "One billion word benchmark for measuring progress in statistical language modeling", |
|
"authors": [ |
|
{ |
|
"first": "Ciprian", |
|
"middle": [], |
|
"last": "Chelba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qi", |
|
"middle": [], |
|
"last": "Ge", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of INTERSPEECH", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2635--2639", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ciprian Chelba, Tomas Mikolov, Mike Schuster, Qi Ge, Thorsten Brants, Phillipp Koehn, and Tony Robin- son. 2014. One billion word benchmark for mea- suring progress in statistical language modeling. In Proceedings of INTERSPEECH, pages 2635-2639.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Learning phrase representations using RNN encoder-decoder for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bart", |
|
"middle": [], |
|
"last": "Van Merrienboer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u00c7aglar", |
|
"middle": [], |
|
"last": "G\u00fcl\u00e7ehre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kyunghyun Cho, Bart van Merrienboer, \u00c7aglar G\u00fcl\u00e7ehre, Dzmitry Bahdanau, Fethi Bougares, Hol- ger Schwenk, and Yoshua Bengio. 2014. Learning phrase representations using RNN encoder-decoder for statistical machine translation. In Proceedings of EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Context-dependent word representation for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Heeyoul", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Computer Speech & Language", |
|
"volume": "45", |
|
"issue": "", |
|
"pages": "149--160", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Heeyoul Choi, Kyunghyun Cho, and Yoshua Bengio. 2017. Context-dependent word representation for neural machine translation. Computer Speech & Language, 45:149-160.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Deep reinforcement learning for mention-ranking coreference models", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2256--2262", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kevin Clark and Christopher D. Manning. 2016a. Deep reinforcement learning for mention-ranking coreference models. In Proceedings of EMNLP, pages 2256-2262.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Improving coreference resolution by learning entitylevel distributed representations", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "643--653", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kevin Clark and Christopher D. Manning. 2016b. Im- proving coreference resolution by learning entity- level distributed representations. In Proceedings of ACL, pages 643-653.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Language Modeling with Gated Convolutional Networks", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Yann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angela", |
|
"middle": [], |
|
"last": "Dauphin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Auli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Grangier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1612.08083" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yann N. Dauphin, Angela Fan, Michael Auli, and David Grangier. 2016. Language Modeling with Gated Convolutional Networks. arXiv preprint arXiv:1612.08083.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Linguistic knowledge as memory for recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Bhuwan", |
|
"middle": [], |
|
"last": "Dhingra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "William", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Cohen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1703.02620" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bhuwan Dhingra, Zhilin Yang, William W Cohen, and Ruslan Salakhutdinov. 2017. Linguistic knowledge as memory for recurrent neural networks. arXiv preprint arXiv:1703.02620.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Oneshot learning of object categories", |
|
"authors": [ |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Fei-Fei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rob", |
|
"middle": [], |
|
"last": "Fergus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pietro", |
|
"middle": [], |
|
"last": "Perona", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "IEEE transactions on TPAMI", |
|
"volume": "28", |
|
"issue": "4", |
|
"pages": "594--611", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Li Fei-Fei, Rob Fergus, and Pietro Perona. 2006. One- shot learning of object categories. IEEE transac- tions on TPAMI, 28(4):594-611.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Improving neural language models with a continuous cache", |
|
"authors": [ |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicolas", |
|
"middle": [], |
|
"last": "Usunier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Edouard Grave, Armand Joulin, and Nicolas Usunier. 2017. Improving neural language models with a continuous cache. In Proceedings of ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Incorporating copying mechanism in sequence-to-sequence learning", |
|
"authors": [ |
|
{ |
|
"first": "Jiatao", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhengdong", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Victor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1631--1640", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiatao Gu, Zhengdong Lu, Hang Li, and O.K. Vic- tor Li. 2016. Incorporating copying mechanism in sequence-to-sequence learning. In Proceedings of ACL, pages 1631-1640.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Pointing the unknown words", |
|
"authors": [ |
|
{ |
|
"first": "Caglar", |
|
"middle": [], |
|
"last": "Gulcehre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sungjin", |
|
"middle": [], |
|
"last": "Ahn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ramesh", |
|
"middle": [], |
|
"last": "Nallapati", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bowen", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "140--149", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Caglar Gulcehre, Sungjin Ahn, Ramesh Nallapati, Bowen Zhou, and Yoshua Bengio. 2016. Pointing the unknown words. In Proceedings of ACL, pages 140-149.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Traversing knowledge graphs in vector space", |
|
"authors": [ |
|
{ |
|
"first": "Kelvin", |
|
"middle": [], |
|
"last": "Guu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Miller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "318--327", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kelvin Guu, John Miller, and Percy Liang. 2015. Traversing knowledge graphs in vector space. In Proceedings of EMNLP, pages 318-327.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Tracking the world state with recurrent entity networks", |
|
"authors": [ |
|
{ |
|
"first": "Mikael", |
|
"middle": [], |
|
"last": "Henaff", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arthur", |
|
"middle": [], |
|
"last": "Szlam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bordes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yann", |
|
"middle": [], |
|
"last": "Lecun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mikael Henaff, Jason Weston, Arthur Szlam, Antoine Bordes, and Yann LeCun. 2017. Tracking the world state with recurrent entity networks. In Proceedings of ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Teaching machines to read and comprehend", |
|
"authors": [ |
|
{ |
|
"first": "Karl", |
|
"middle": [], |
|
"last": "Moritz Hermann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Kocisky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Grefenstette", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lasse", |
|
"middle": [], |
|
"last": "Espeholt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Will", |
|
"middle": [], |
|
"last": "Kay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mustafa", |
|
"middle": [], |
|
"last": "Suleyman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Blunsom", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of NIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1684--1692", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Karl Moritz Hermann, Tomas Kocisky, Edward Grefenstette, Lasse Espeholt, Will Kay, Mustafa Su- leyman, and Phil Blunsom. 2015. Teaching ma- chines to read and comprehend. In Proceedings of NIPS, pages 1684-1692.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Long short-term memory", |
|
"authors": [ |
|
{ |
|
"first": "Sepp", |
|
"middle": [], |
|
"last": "Hochreiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00fcrgen", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Neural computation", |
|
"volume": "9", |
|
"issue": "8", |
|
"pages": "1735--1780", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation, 9(8):1735-1780.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Dynamic entity representations in neural language models", |
|
"authors": [ |
|
{ |
|
"first": "Yangfeng", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chenhao", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Martschat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yangfeng Ji, Chenhao Tan, Sebastian Martschat, Yejin Choi, and Noah A. Smith. 2017. Dynamic entity representations in neural language models. In Pro- ceedings of EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Exploring the limits of language modeling", |
|
"authors": [ |
|
{ |
|
"first": "Rafal", |
|
"middle": [], |
|
"last": "Jozefowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonghui", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1602.02410" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rafal Jozefowicz, Oriol Vinyals, Mike Schuster, Noam Shazeer, and Yonghui Wu. 2016. Exploring the limits of language modeling. arXiv preprint arXiv:1602.02410.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Character-aware neural language models", |
|
"authors": [ |
|
{ |
|
"first": "Yoon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yacine", |
|
"middle": [], |
|
"last": "Jernite", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Sontag", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Rush", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2741--2749", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoon Kim, Yacine Jernite, David Sontag, and Alexan- der M. Rush. 2016. Character-aware neural lan- guage models. In Proceedings of AAAI, pages 2741- 2749.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "Diederik", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In Proceedings of ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Dynamic entity representation with max-pooling improves machine reading", |
|
"authors": [ |
|
{ |
|
"first": "Sosuke", |
|
"middle": [], |
|
"last": "Kobayashi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ran", |
|
"middle": [], |
|
"last": "Tian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naoaki", |
|
"middle": [], |
|
"last": "Okazaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kentaro", |
|
"middle": [], |
|
"last": "Inui", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "850--855", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sosuke Kobayashi, Ran Tian, Naoaki Okazaki, and Kentaro Inui. 2016. Dynamic entity representation with max-pooling improves machine reading. In Proceedings of NAACL-HLT, pages 850-855.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Do multi-sense embeddings improve natural language understanding?", |
|
"authors": [ |
|
{ |
|
"first": "Jiwei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1722--1732", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiwei Li and Dan Jurafsky. 2015. Do multi-sense em- beddings improve natural language understanding? In Proceedings of EMNLP, pages 1722-1732.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Achieving open vocabulary neural machine translation with hybrid word-character models", |
|
"authors": [ |
|
{ |
|
"first": "Minh-Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D. Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1054--1063", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Minh-Thang Luong and D. Christopher Manning. 2016. Achieving open vocabulary neural machine translation with hybrid word-character models. In Proceedings of ACL, pages 1054-1063.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Addressing the rare word problem in neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wojciech", |
|
"middle": [], |
|
"last": "Zaremba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "11--19", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thang Luong, Ilya Sutskever, Quoc Le, Oriol Vinyals, and Wojciech Zaremba. 2015. Addressing the rare word problem in neural machine translation. In Pro- ceedings of ACL, pages 11-19.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Learning generic context embedding with bidirectional lstm", |
|
"authors": [ |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Melamud", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Goldberger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of CoNLL", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "51--61", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oren Melamud, Jacob Goldberger, and Ido Dagan. 2016. context2vec: Learning generic context em- bedding with bidirectional lstm. In Proceedings of CoNLL, pages 51-61.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Pointer sentinel mixture models", |
|
"authors": [ |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Merity", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Bradbury", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephen Merity, Caiming Xiong, James Bradbury, and Richard Socher. 2017. Pointer sentinel mixture models. In Proceedings of ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Recurrent neural network based language model", |
|
"authors": [ |
|
{ |
|
"first": "Tom\u00e1\u0161", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Karafi\u00e1t", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luk\u00e1\u0161", |
|
"middle": [], |
|
"last": "Burget", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ja\u0148", |
|
"middle": [], |
|
"last": "Cernock\u00fd", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanjeev", |
|
"middle": [], |
|
"last": "Khudanpur", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of INTERSPEECH", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1045--1048", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tom\u00e1\u0161 Mikolov, Martin Karafi\u00e1t, Luk\u00e1\u0161 Burget, Ja\u0148 Cernock\u00fd, and Sanjeev Khudanpur. 2010. Recurrent neural network based language model. In Proceed- ings of INTERSPEECH, pages 1045-1048.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Distributed representations of words and phrases and their compositionality", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gregory", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of NIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3111--3119", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Gregory S. Corrado, and Jeffrey Dean. 2013. Distributed repre- sentations of words and phrases and their composi- tionality. In Proceedings of NIPS, pages 3111-3119.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Rectified linear units improve restricted boltzmann machines", |
|
"authors": [ |
|
{ |
|
"first": "Vinod", |
|
"middle": [], |
|
"last": "Nair", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Hinton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of ICML", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "807--814", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vinod Nair and Geoffrey E. Hinton. 2010. Recti- fied linear units improve restricted boltzmann ma- chines. In Proceedings of ICML, pages 807-814. Omnipress.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Two discourse driven language models for semantics", |
|
"authors": [ |
|
{ |
|
"first": "Haoruo", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "290--300", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Haoruo Peng and Dan Roth. 2016. Two discourse driven language models for semantics. In Proceed- ings of ACL, pages 290-300.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "CoNLL-2012 shared task: Modeling multilingual unrestricted coreference in OntoNotes", |
|
"authors": [ |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Sameer Pradhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nianwen", |
|
"middle": [], |
|
"last": "Moschitti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Olga", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuchen", |
|
"middle": [], |
|
"last": "Uryupina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sameer Pradhan, Alessandro Moschitti, Nianwen Xue, Olga Uryupina, and Yuchen Zhang. 2012. CoNLL- 2012 shared task: Modeling multilingual unre- stricted coreference in OntoNotes. In Proceedings of CoNLL.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Japanese and korean voice search", |
|
"authors": [ |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kaisuke", |
|
"middle": [], |
|
"last": "Nakajima", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of ICASSP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5149--5152", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mike Schuster and Kaisuke Nakajima. 2012. Japanese and korean voice search. In Proceedings of ICASSP, pages 5149-5152.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Neural machine translation of rare words with subword units", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1715--1725", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural machine translation of rare words with subword units. In Proceedings of ACL, pages 1715- 1725.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Building end-to-end dialogue systems using generative hierarchical neural network models", |
|
"authors": [ |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Iulian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Serban", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Sordoni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aaron", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joelle", |
|
"middle": [], |
|
"last": "Courville", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Pineau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3776--3783", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Iulian V. Serban, Alessandro Sordoni, Yoshua Bengio, Aaron Courville, and Joelle Pineau. 2016. Building end-to-end dialogue systems using generative hier- archical neural network models. In Proceedings of AAAI, pages 3776-3783.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "End-to-end memory networks", |
|
"authors": [ |
|
{ |
|
"first": "Sainbayar", |
|
"middle": [], |
|
"last": "Sukhbaatar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arthur", |
|
"middle": [], |
|
"last": "Szlam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rob", |
|
"middle": [], |
|
"last": "Fergus", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of NIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2440--2448", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sainbayar Sukhbaatar, Arthur Szlam, Jason Weston, and Rob Fergus. 2015. End-to-end memory net- works. In Proceedings of NIPS, pages 2440-2448.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Chainer: a next-generation open source framework for deep learning", |
|
"authors": [ |
|
{ |
|
"first": "Seiya", |
|
"middle": [], |
|
"last": "Tokui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenta", |
|
"middle": [], |
|
"last": "Oono", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shohei", |
|
"middle": [], |
|
"last": "Hido", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Clayton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of Workshop on LearningSys in NIPS 28", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Seiya Tokui, Kenta Oono, Shohei Hido, and Justin Clayton. 2015. Chainer: a next-generation open source framework for deep learning. In Proceedings of Workshop on LearningSys in NIPS 28.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Recurrent memory networks for language modeling", |
|
"authors": [ |
|
{ |
|
"first": "Ke", |
|
"middle": [], |
|
"last": "Tran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arianna", |
|
"middle": [], |
|
"last": "Bisazza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christof", |
|
"middle": [], |
|
"last": "Monz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "321--331", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ke Tran, Arianna Bisazza, and Christof Monz. 2016. Recurrent memory networks for language modeling. In Proceedings of NAACL-HLT, pages 321-331.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Matching networks for one shot learning", |
|
"authors": [ |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Charles", |
|
"middle": [], |
|
"last": "Blundell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Lillicrap", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daan", |
|
"middle": [], |
|
"last": "Wierstra", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of NIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3630--3638", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oriol Vinyals, Charles Blundell, Tim Lillicrap, koray kavukcuoglu, and Daan Wierstra. 2016. Matching networks for one shot learning. In Proceedings of NIPS, pages 3630-3638.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Larger-context language modelling with recurrent neural network", |
|
"authors": [ |
|
{ |
|
"first": "Tian", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1319--1329", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tian Wang and Kyunghyun Cho. 2016. Larger-context language modelling with recurrent neural network. In Proceedings of ACL, pages 1319-1329.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Towards ai-complete question answering: A set of prerequisite toy tasks", |
|
"authors": [ |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bordes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sumit", |
|
"middle": [], |
|
"last": "Chopra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1502.05698" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jason Weston, Antoine Bordes, Sumit Chopra, and Tomas Mikolov. 2015. Towards ai-complete ques- tion answering: A set of prerequisite toy tasks. arXiv preprint arXiv:1502.05698.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Learning global features for coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Wiseman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Rush", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stuart", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Shieber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "994--1004", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sam Wiseman, Alexander M. Rush, and Stuart M. Shieber. 2016. Learning global features for coref- erence resolution. In Proceedings of NAACL-HLT, pages 994-1004.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Reference-aware language models", |
|
"authors": [ |
|
{ |
|
"first": "Zichao", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Blunsom", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wang", |
|
"middle": [], |
|
"last": "Ling", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zichao Yang, Phil Blunsom, Chris Dyer, and Wang Ling. 2017. Reference-aware language models. In Proceedings of EMNLP.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Dynamic Neural Text Modeling: the meaning representation of each unknown word, denoted by a coreference index \"[ k ]\", is inferred from the local contexts in which it occurs." |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "5 and 6.Figure 4depicts the perplexity of words with different positions in a document 9 . The figure confirms that the advantage of the proposed method over the baseline is more evident9 It is more difficult to predict tokens appearing latter in a document because the number of new (unknown) tokens increases as a model reads the document." |
|
}, |
|
"FIGREF3": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Perplexity of tokens following the entities relative to the time at which the entity occurs." |
|
}, |
|
"FIGREF4": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Perplexity of entities relative to the number of antecedent entities." |
|
}, |
|
"TABREF0": { |
|
"text": "and convolutional neural network (Dauphin ... [ 1 ] killed [ 2 ] with bombs \u2026 ... police suspects [ 1 ] attacked ... ... police will arrest [ 1 ] \u2026 ... will arrest [ 1 ] soon \u2026", |
|
"content": "<table><tr><td>d [2],2</td><td/><td colspan=\"2\">d [1],2</td><td/></tr><tr><td/><td/><td/><td/><td>!</td><td>!</td><td>!</td><td>!</td></tr><tr><td>x [2]</td><td>x [1]</td><td>y [2]</td><td>y [1] =</td><td>\u2212 \u2212\u2212 \u2192 RNN(</td></tr></table>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"text": "d [4],2 d [4],1 ... [ 1 ] killed [ 2 ] with bombs \u2026 ... police suspects [ 1 ] attacked ...", |
|
"content": "<table><tr><td>d' [1],1</td><td>d' [2],1</td></tr><tr><td/><td>Merge</td></tr><tr><td/><td>Merge</td><td>d [3],1 d [2],1</td></tr></table>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null |
|
}, |
|
"TABREF3": { |
|
"text": "Perplexities for each token group of models on the test set of Anonymized Language Modeling dataset. All values are averages with standard errors, calculated respectively by three models (trained with different random numbers). Dynamic models used GRU followed by ReLU as the merging function.", |
|
"content": "<table><tr><td/><td/><td>(2) Reappearing</td><td>(3) Following</td><td/></tr><tr><td>Models</td><td>(1) All</td><td>entities</td><td>entities</td><td>(4) Non-entities</td></tr><tr><td>LSTM LM (Baseline) (A)</td><td>64.8\u00b10.6</td><td>48.0\u00b12.6</td><td>128.6\u00b12.0</td><td>68.5\u00b10.2</td></tr><tr><td>With only dynamic input (B)</td><td>62.8\u00b10.3</td><td>42.4\u00b11.1</td><td>109.5\u00b11.4</td><td>66.4\u00b10.3</td></tr><tr><td>With only dynamic output (C)</td><td>62.5\u00b10.3</td><td>35.9\u00b13.7</td><td>129.0\u00b10.7</td><td>69.5\u00b10.3</td></tr><tr><td colspan=\"2\">With dynamic input & output (D) 60.7\u00b10.2</td><td>34.0\u00b11.3</td><td>106.8\u00b10.6</td><td>67.6\u00b10.04</td></tr><tr><td>Table 2:</td><td/><td/><td/><td/></tr><tr><td/><td/><td colspan=\"3\">8 https://github.com/soskek/dynamic_</td></tr><tr><td/><td/><td colspan=\"2\">neural_text_model</td><td/></tr></table>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null |
|
}, |
|
"TABREF4": { |
|
"text": "Results for models with different merging functions on the test set of the Anonymized Language Modeling dataset, as same as inTable 2.", |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null |
|
}, |
|
"TABREF5": { |
|
"text": "Mean Quantile of a true coreferent entity among antecedent entities.", |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null |
|
} |
|
} |
|
} |
|
} |