|
{ |
|
"paper_id": "D19-1026", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T16:11:09.390048Z" |
|
}, |
|
"title": "Learning Dynamic Context Augmentation for Global Entity Linking", |
|
"authors": [ |
|
{ |
|
"first": "Xiyuan", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Zhejiang University", |
|
"location": {} |
|
}, |
|
"email": "yangxiyuan@zju.edu.cn" |
|
}, |
|
{ |
|
"first": "Xiaotao", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Illinois at Urbana Champaign", |
|
"location": {} |
|
}, |
|
"email": "xiaotao2@illinois.edu" |
|
}, |
|
{ |
|
"first": "Sheng", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Zhejiang University", |
|
"location": {} |
|
}, |
|
"email": "shenglin@zju.edu.cn" |
|
}, |
|
{ |
|
"first": "Siliang", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Zhejiang University", |
|
"location": {} |
|
}, |
|
"email": "siliang@zju.edu.cn" |
|
}, |
|
{ |
|
"first": "Yueting", |
|
"middle": [], |
|
"last": "Zhuang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Zhejiang University", |
|
"location": {} |
|
}, |
|
"email": "yzhuang@zju.edu.cn" |
|
}, |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Zhejiang University", |
|
"location": {} |
|
}, |
|
"email": "wufei@zju.edu.cn" |
|
}, |
|
{ |
|
"first": "Zhigang", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "zgchen@iflytek.com" |
|
}, |
|
{ |
|
"first": "Guoping", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "gphu@iflytek.com" |
|
}, |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Southern", |
|
"location": { |
|
"country": "California" |
|
} |
|
}, |
|
"email": "xiangren@usc.edu" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Despite of the recent success of collective entity linking (EL) methods, these \"global\" inference methods may yield sub-optimal results when the \"all-mention coherence\" assumption breaks, and often suffer from high computational cost at the inference stage, due to the complex search space. In this paper, we propose a simple yet effective solution, called Dynamic Context Augmentation (DCA), for collective EL, which requires only one pass through the mentions in a document. DCA sequentially accumulates context information to make efficient, collective inference, and can cope with different local EL models as a plugand-enhance module. We explore both supervised and reinforcement learning strategies for learning the DCA model. Extensive experiments 1 show the effectiveness of our model with different learning settings, base models, decision orders and attention mechanisms.", |
|
"pdf_parse": { |
|
"paper_id": "D19-1026", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Despite of the recent success of collective entity linking (EL) methods, these \"global\" inference methods may yield sub-optimal results when the \"all-mention coherence\" assumption breaks, and often suffer from high computational cost at the inference stage, due to the complex search space. In this paper, we propose a simple yet effective solution, called Dynamic Context Augmentation (DCA), for collective EL, which requires only one pass through the mentions in a document. DCA sequentially accumulates context information to make efficient, collective inference, and can cope with different local EL models as a plugand-enhance module. We explore both supervised and reinforcement learning strategies for learning the DCA model. Extensive experiments 1 show the effectiveness of our model with different learning settings, base models, decision orders and attention mechanisms.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Linking mentions of entities in text to knowledge base entries (i.e., entity linking, or EL) is critical to understanding and structuring text corpora. In general, EL is approached by first obtaining candidate entities for each mention, and then identifying the true referent among the candidate entities. Prior distribution and local contexts, either in the form of hand-crafted features (Ratinov et al., 2011; Shen et al., 2015) or dense embeddings (He et al., 2013; Nguyen et al., 2016; Francis-Landau et al., 2016) , play key roles in distinguishing different candidates. However, in many cases, local features can be too sparse to provide sufficient information for disambiguation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 389, |
|
"end": 411, |
|
"text": "(Ratinov et al., 2011;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 412, |
|
"end": 430, |
|
"text": "Shen et al., 2015)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 451, |
|
"end": 468, |
|
"text": "(He et al., 2013;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 469, |
|
"end": 489, |
|
"text": "Nguyen et al., 2016;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 490, |
|
"end": 518, |
|
"text": "Francis-Landau et al., 2016)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To alleviate this problem, various collective EL models have been proposed to globally optimize A traditional global EL model jointly optimizes the linking configuration after iterative calculations over all mentions, which is computationally expensive. In contrast, the DCA process only requires one pass of the document to accumulate knowledge from previously linked mentions to enhance fast future inference. the inter-entity coherence between mentions in the same document (Hoffart et al., 2011; Cheng and Roth, 2013; Nguyen et al., 2014; Alhelbawy and Gaizauskas, 2014; Pershina et al., 2015) . Despite of their success, existing global EL models try to optimize the entire linking configuration of all mentions, with extra assumptions of either allmention coherence or pairwise coherence (Phan et al., 2018) . Such assumptions are against human intuitions, as they imply that no inference can be made until all mentions in a document have been observed. Also, there usually exists a tradeoff between accuracy and efficiency: state-of-theart collective/global models suffer from high time complexity. From the perspective of computational efficiency, optimal global configuration inference is NP-hard. Approximation methods, such as loopy belief propagation (Ganea and Hofmann, 2017) or iterative substitutions (Shen et al., 2015) , are still computationally expensive due to the huge hypothesis space, and thus can hardly be scaled to handle large corpus. Many previous works have discussed the urgent needs of more efficient linking system for production, both in time complexity (Hughes et al., 2014) and memory consumption (Blanco et al., 2015) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 477, |
|
"end": 499, |
|
"text": "(Hoffart et al., 2011;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 500, |
|
"end": 521, |
|
"text": "Cheng and Roth, 2013;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 522, |
|
"end": 542, |
|
"text": "Nguyen et al., 2014;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 543, |
|
"end": 574, |
|
"text": "Alhelbawy and Gaizauskas, 2014;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 575, |
|
"end": 597, |
|
"text": "Pershina et al., 2015)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 794, |
|
"end": 813, |
|
"text": "(Phan et al., 2018)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 1263, |
|
"end": 1288, |
|
"text": "(Ganea and Hofmann, 2017)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 1316, |
|
"end": 1335, |
|
"text": "(Shen et al., 2015)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 1587, |
|
"end": 1608, |
|
"text": "(Hughes et al., 2014)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 1632, |
|
"end": 1653, |
|
"text": "(Blanco et al., 2015)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we propose a simple yet effective Dynamic Context Augmentation (DCA) process to incorporate global signal for EL. As Figure 1 shows, in contrast to traditional global models, DCA only requires one pass through all mentions to achieve comparable linking accuracy. The basic idea is to accumulate knowledge from previously linked entities as dynamic context to enhance later decisions. Such knowledge come from not only the inherent properties (e.g., description, attributes) of previously linked entities, but also from their closely related entities, which empower the model with important associative abilities. In real scenarios, some previously linked entities may be irrelevant to the current mention. Some falsely linked entities may even introduce noise. To alleviate error propagation, we further explore two strategies: (1) soft/hard attention mechanisms that favour the most relevant entities;", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 129, |
|
"end": 140, |
|
"text": "As Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "(2) a reinforcement learning-based ranking model, which proves to be effective as reported in other information extraction tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Contributions. The DCA model forms a new linking strategy from the perspective of data augmentation and thus can serve as a plug-andenhance module of existing linking models. The major contributions of this work are as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "(1) DCA can introduce topical coherence into local linking models without reshaping their original designs or structures; (2) Comparing to global EL models, DCA only requires one pass through all mentions, yielding better efficiency in both training and inference; (3) Extensive experiments show the effectiveness of our model under different learning settings, base models, decision orders and attention mechanisms.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Given a set of entity mentions M = {m 1 , ..., m T } in corpus D, Entity Linking aims to link each mention m t to its corresponding gold entity e * t . Such a process is usually divided into two steps: Candidate generation first collects a set of possible (candidate) entities E t = {e 1 t , ..., e |Et| t } for m t ; Candidate ranking is then applied to rank all candidates by likelihood. The linking system selects the top ranked candidate as the predicted entity\u00ea t . The key challenge is to capture high-quality features of each entity mention for accurate entity prediction, especially when local contexts are too sparse to disambiguate all candidates.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Definition", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We build our DCA model based on two existing local EL models. In this section, we first introduce the architecture of the base models, then present the proposed DCA model under the standard supervised learning framework. Since the DCA process can be naturally formed as a sequential decision problem, we also explore its effectiveness under the Reinforcement Learning framework. Detailed performance comparison and ablation studies are reported in Section 6.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Definition", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We apply the DCA process in two popular local models with different styles: the first is a neural attention model named ETHZ-Attn (Ganea and Hofmann, 2017), the other is the Berkeley-CNN (Francis-Landau et al., 2016) model which is made up of multiple convolutional neural networks (CNN).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Local Base Models for Entity Linking", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "ETHZ-Attn. For each mention m t and a candidate e j t \u2208 E t , three local features are considered: (1) Mention-entity PriorP (e j t |m t ) is the empirical distribution estimated from massive corpus (e.g.Wikipedia); (2) Context Similarity \u03a8 C (m t , e j t ) measures the textual similarity between e j t and the local context of m t ; (3) Type Similarity \u03a8 T (m t , e j t ) considers the similarity between the type of e j t and contexts around m t .P (e j t |m t ) and \u03a8 C (m t , e j t ) are calculated in the same way as (Ganea and Hofmann, 2017). For \u03a8 T (m t , e j t ), we first train a typing system proposed by (Xu and Barbosa, 2018) on AIDAtrain dataset, yielding 95% accuracy on AIDA-A dataset. In the testing phase, the typing system predicts the probability distribution over all types (PER, GPE, ORG and UNK) for m t , and outputs \u03a8 T (m t , e j t ) for each candidate accordingly. All local features are integrated by a two-layer feedforward neural network with 100 hidden units, as described in (Ganea and Hofmann, 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 617, |
|
"end": 639, |
|
"text": "(Xu and Barbosa, 2018)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 1008, |
|
"end": 1033, |
|
"text": "(Ganea and Hofmann, 2017)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Local Base Models for Entity Linking", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Berkeley-CNN. The only difference between ETHZ-Attn and Berkeley-CNN is that, this model utilizes CNNs at different granularities to capture context similarity \u03a8 C (m t , e j t ) between a mention's context and its target candidate entities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Local Base Models for Entity Linking", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "As Figure 1 demonstrates, the basic idea of DCA is to accumulate knowledge from previously linked entities as dynamic context to enhance later decisions. Formally, denote the list of previously linked entities as S t = {\u00ea 1 , ...,\u00ea t }, where each\u00ea i is represented as an embedding vector. The augmented context can be represented by accumulated features of all previous entities and their neighbors (e.g. by averaging their embeddings, in the simplest way). In actual scenarios, some entities in S t are irrelevant, if not harmful, to the linking result of m t+1 . To highlight the importance of relevant entities while filtering noises, we also try to apply a neural attention mechanism on dynamic contexts ( Figure 2 ). For mention m t+1 , candidates that are more coherent with S t are preferred. More specifically, we calculate the relevance score for eac\u0125", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 11, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 711, |
|
"end": 719, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dynamic Context Augmentation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "e i \u2208 S t as u(\u00ea i ) = max e j t+1 \u2208E t+1 e j t+1 \u2022 A \u2022\u00ea i ,", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Dynamic Context Augmentation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "where A is a parameterized diagonal matrix. Top K entities in S t are left to form dynamic context while the others are pruned. The relevance scores are transformed to attention weights with", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dynamic Context Augmentation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "a(\u00ea i ) = exp[u(\u00ea i )] \u00ea j \u2208St exp[u(\u00ea j )] .", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Dynamic Context Augmentation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Thus, we can define a weighted coherence score between e j t+1 \u2208 E t+1 and S t as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dynamic Context Augmentation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03a6(e j t+1 , S t ) = \u00ea i \u2208St a(\u00ea i ) \u2022 e j t+1 \u2022 R \u2022\u00ea i ,", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Dynamic Context Augmentation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "where R is a learnable diagonal matrix. Such a coherence score will be later incorporated in the final representation of e j t+1 . To empower the linking model with associative ability, aside from previously linked entities, we also incorporate entities that are closely associated with entities in S t . Specifically, for each\u00ea i \u2208 S t , we collect its neighborhood N (\u00ea i ) consisting of Wikipedia entities that have inlinks pointing to\u00ea i . Denoting S t as the union of {N (\u00ea i )|\u00ea i \u2208 S t }, we define a similar weighted coherence score between e j t+1 \u2208 E t+1 and S t as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dynamic Context Augmentation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u03a6 (e j t+1 , S t ) = \u00ea i \u2208S t a (\u00ea i ) \u2022 e j t+1 \u2022 R \u2022\u00ea i , (4)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dynamic Context Augmentation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "where a is defined similarly to a, and R is a learnable diagonal matrix. The final representation h 0 (m t+1 , e j t+1 ) is the concatena-", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dynamic Context Augmentation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "tion of \u03a6(e j t+1 , S t ), \u03a6 (e j t+1 , S t ), \u03a8 T (m t , e j t+1 ), \u03a8 C (m t , e j t+1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dynamic Context Augmentation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": ") and logP (e j t+1 |m t+1 ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dynamic Context Augmentation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In this section, we explore different learning strategies for the linking model. Specifically, we present a Supervised Learning model, where the model is given all gold entities for training, and a Reinforcement Learning model, where the model explores possible linking results by itself in a longterm planning task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Learning for DCA", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Given a mention-candidate pair (m t , e j t ), the ranking model parameterized by \u03b8 accepts the feature vector h 0 (m t , e j t ) as input, and outputs the probability P \u03b8 (e j t |m t ). In this work, we use a two-layer feedforward neural network as the ranking model. We apply the max-margin loss as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised Ranking Method", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "L \u03b8 = D\u2208D mt\u2208D e j t \u2208Et g \u03b8 (e j t , m t ),", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised Ranking Method", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "g \u03b8 (e j t , m t ) = max(0, \u03b3 \u2212 P \u03b8 (e * t |m t ) + P \u03b8 (e j t |m t )). The learning process is to estimate the optimal parameter such that \u03b8 * = arg min \u03b8 L \u03b8 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised Ranking Method", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Note that, in the Supervised Ranking model, dynamic contexts are provided by previous gold entities:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised Ranking Method", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "S t = {e * 1 , ..., e * t }, S t = t i=1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised Ranking Method", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "N (e * i ). In the testing phase, however, we do not have access to gold entities. Wrongly linked entities can introduce noisy contexts to future linking steps. To consider such long-term influences, we introduce an alternative Reinforcement Learning model in the next section.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised Ranking Method", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Naturally, the usage of dynamic context augmentation forms a sequential decision problem, as each linking step depends on previous linking decisions. Correct linking results provide valuable information for future decisions, while previous mistakes can lead to error accumulation. Reinforcement Learning (RL) algorithms have proven to be able to alleviate such accumulated noises in the decision sequence in many recent works (Narasimhan et al., 2016; Feng et al., 2018) . In this work, we propose an RL ranking model for DCA-enhanced entity linking.", |
|
"cite_spans": [ |
|
{ |
|
"start": 426, |
|
"end": 451, |
|
"text": "(Narasimhan et al., 2016;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 452, |
|
"end": 470, |
|
"text": "Feng et al., 2018)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reinforcement Learning Method", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Agent: The Agent is a candidate ranking model that has a similar architecture to (Clark and Manning, 2016) , aiming to output the action preference", |
|
"cite_spans": [ |
|
{ |
|
"start": 81, |
|
"end": 106, |
|
"text": "(Clark and Manning, 2016)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reinforcement Learning Method", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "H \u03b8 (S t\u22121 , S t\u22121 , A j t ) of each linking action A j t = (m t \u2192 e j t )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reinforcement Learning Method", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": ". It is a 2-layer feedforward neural network with following components:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reinforcement Learning Method", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Input Layer: For each (m t , e j t ) pair, DCA-RL extracts context-dependent features from S t\u22121 , S t\u22121 , and concatenates them with other context-independent features to produce an Idimensional input vector h 0 (m t , e j t ). Hidden Layers: Let Drop( x) be the dropout operation (Srivastava et al., 2014) and ReLU ( x) be the rectifier nonlinearity (Nair and Hinton, 2010) . So the output h 1 of the hidden layer is defined as:", |
|
"cite_spans": [ |
|
{ |
|
"start": 282, |
|
"end": 307, |
|
"text": "(Srivastava et al., 2014)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 352, |
|
"end": 375, |
|
"text": "(Nair and Hinton, 2010)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reinforcement Learning Method", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "h 1 = Drop(ReLU ( W 1 \u2022 h 0 + b 1 )),", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Reinforcement Learning Method", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "where W 1 is a H 1 \u00d7 I weight matrix.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reinforcement Learning Method", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Output Layers: This scoring layer is also fully connected layer of size 1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reinforcement Learning Method", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "h 2 = W 2 \u2022 h 1 + b 2 ,", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "Reinforcement Learning Method", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "where W 2 is a 1\u00d7H 1 weight matrix. In the end, all action preference would be normalized together using an exponential softmax distribution, getting their action probabilities \u03c0 \u03b8 (A j t |S t\u22121 , S t\u22121 ): According to policy approximating methods, the best approximate policy may be stochastic. So we randomly sample the actions based on the softmax distribution during the training time, whereas deliberately select the actions with the highest ranking score at the test time.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reinforcement Learning Method", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Reward. The reward signals are quite sparse in our framework. For each trajectory, the Agent can only receive a reward signal after it finishes all the linking actions in a given document. Therefore the immediate reward of action t, R t = 0, where 0 \u2264 t < T , and R T = \u2212(|M e |/T ), where T is total number of mentions in the source document, and |M e | is the number of incorrectly linked mentions. Then the value G t (expected reward) of each previous state S t can be retraced back with a discount factor \u03c1 according to R T :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reinforcement Learning Method", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "G t = \u03c1 T \u2212t R T", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "Reinforcement Learning Method", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "To maximize the expected reward of all trajectories, the Agent utilizes the REINFORCE algorithm (Sutton and Barto, 1998) to compute Monte Carlo policy gradient over all trajectories, and perform gradient ascent on its parameters:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reinforcement Learning Method", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u03b8 \u2190 \u03b8 + \u03b1 t G t \u2207 \u03b8 ln \u03c0 \u03b8 (A j t |S t\u22121 , S t\u22121 ) (8)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reinforcement Learning Method", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In following sections, to fully investigate the effectiveness of the proposed method, we report and compare the performances of both the Supervisedlearning model and the Reinforcement-learning model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reinforcement Learning Method", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "For each document D, the train and inference of the global EL models are heavily relied on the inter-entity coherence graph \u03a6 g . Many studies (Ratinov et al., 2011; Globerson et al., 2016; Yamada et al., 2016; Ganea and Hofmann, 2017; Le and Titov, 2018) pairwise scores between two arbitrary elements e i x and e j y sampled independently from candidate sets E i and E j in the given document. It is obvious that \u03a6 is intractable, and the computational com-", |
|
"cite_spans": [ |
|
{ |
|
"start": 143, |
|
"end": 165, |
|
"text": "(Ratinov et al., 2011;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 166, |
|
"end": 189, |
|
"text": "Globerson et al., 2016;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 190, |
|
"end": 210, |
|
"text": "Yamada et al., 2016;", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 211, |
|
"end": 235, |
|
"text": "Ganea and Hofmann, 2017;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 236, |
|
"end": 255, |
|
"text": "Le and Titov, 2018)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Analysis of Computational Complexity", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "plexity of \u03a6 g is O(\u03a6 g ) = O( T i=1 T j=1,j =i |E i | e i x \u2208E i |E j | e j y \u2208E j \u03a6(e i x , e j y )) (9) , where \u03a6(e i x , e j y ) is a learnable score function. Thus, O(\u03a6 g ) is approximate to O(T 2 \u00d7 |E| 2 \u00d7 I),", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Analysis of Computational Complexity", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "where |E| is the average number of candidates per mention and I is the unit cost of pairwise function \u03a6. In order to reduce O(\u03a6 g ), most previous models (Hoffart et al., 2011; Ganea and Hofmann, 2017; Le and Titov, 2018; Fang et al., 2019) have to hard prune their candidates into an extremely small size (e.g. |E|=5). This will reduce the gold recall of candidate sets and also unsuitable for large scale production (e.g. entity disambiguation for dynamic web data like Twitter).", |
|
"cite_spans": [ |
|
{ |
|
"start": 154, |
|
"end": 176, |
|
"text": "(Hoffart et al., 2011;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 177, |
|
"end": 201, |
|
"text": "Ganea and Hofmann, 2017;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 202, |
|
"end": 221, |
|
"text": "Le and Titov, 2018;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 222, |
|
"end": 240, |
|
"text": "Fang et al., 2019)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Analysis of Computational Complexity", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In contrast, the computational complexity of our model is O(T \u00d7 |E| \u00d7 I \u00d7 K), where K is the key hyper-parameter described in Section 3 and is usually set to a small number. This indicates the response time of our method grow linearly as a function of T \u00d7 |E|.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Analysis of Computational Complexity", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Datasets. Following our predecessors, we train and test all models on the public and widely used AIDA CoNLL-YAGO dataset (Hoffart et al., 2011) . The target knowledge base is Wikipedia. The corpus consists of 946 documents for training, 216 documents for development and 231 documents for testing (AIDA-train/A/B respectively).", |
|
"cite_spans": [ |
|
{ |
|
"start": 121, |
|
"end": 143, |
|
"text": "(Hoffart et al., 2011)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiment Setup", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "In-KB acc. (%)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "AIDA-light (Nguyen et al., 2014) 84.8 WNED (Guo and Barbosa, 2016) 89.0 Global- RNN (Nguyen et al., 2016) 90.7 MulFocal-Att (Globerson et al., 2016) 91.0 Deep-ED (Ganea and Hofmann, 2017) 92.22 Ment-Norm (Le and Titov, 2018) 93.07", |
|
"cite_spans": [ |
|
{ |
|
"start": 11, |
|
"end": 32, |
|
"text": "(Nguyen et al., 2014)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 43, |
|
"end": 66, |
|
"text": "(Guo and Barbosa, 2016)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 80, |
|
"end": 105, |
|
"text": "RNN (Nguyen et al., 2016)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 124, |
|
"end": 148, |
|
"text": "(Globerson et al., 2016)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 162, |
|
"end": 187, |
|
"text": "(Ganea and Hofmann, 2017)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 204, |
|
"end": 224, |
|
"text": "(Le and Titov, 2018)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Prior ( To evaluate the generalization ability of each model, we apply cross-domain experiments following the same setting in (Ganea and Hofmann, 2017; Le and Titov, 2018; . Models are trained on AIDA-train, and evaluated on five popular public datasets: AQUAINT (Milne and Witten, 2008) , MSNBC (Cucerzan, 2007), ACE2004 (Ratinov et al., 2011) , CWEB (Guo and Barbosa, 2016) and WIKI (Guo and Barbosa, 2016) . The statistics of these datasets are available in Table 1 . In the candidate generation step, we directly use the candidates provided by the Ment-Norm system (Le and Titov, 2018) 2 , and their quality is also listed in Table 1 For a fair comparison with prior work, we use the same input as the WNED, Deep-ED and Ment-Norm (models proposed after 2016), and report the performance of our model with both Supervised Learning (DCA-SL) and Reinforcement Learning (DCA-RL). We won't compare our models with the RLEL (Fang et al., 2019) which is a deep reinforcement learning based LSTM model. There are two reasons: 1) RLEL uses optimized candidate sets with smaller candidate size and higher gold recall than ours and the listed baselines. 2) RLEL uses additional training set from Wikipedia data. (Fang et al., 2019) doesn't release either their candidate sets or updated training corpus, so the comparison with their work would be unfair for us.", |
|
"cite_spans": [ |
|
{ |
|
"start": 152, |
|
"end": 171, |
|
"text": "Le and Titov, 2018;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 263, |
|
"end": 287, |
|
"text": "(Milne and Witten, 2008)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 322, |
|
"end": 344, |
|
"text": "(Ratinov et al., 2011)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 352, |
|
"end": 375, |
|
"text": "(Guo and Barbosa, 2016)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 385, |
|
"end": 408, |
|
"text": "(Guo and Barbosa, 2016)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 922, |
|
"end": 941, |
|
"text": "(Fang et al., 2019)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 1205, |
|
"end": 1224, |
|
"text": "(Fang et al., 2019)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 461, |
|
"end": 468, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
}, |
|
{ |
|
"start": 630, |
|
"end": 637, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "System", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Hyper-parameter Setting. We coarsely tune the hyper-parameters according to model performance on AIDA-A. We set the dimensions of word embedding and entity embedding to 300, where the word embedding and entity embedding are publicly released by (Pennington et al., 2014) and (Ganea and Hofmann, 2017) respectively. Hyper-parameters of the best validated model are: K = 7, I = 5, H 1 = 100, and the probability of dropout is set to 0.2. Besides, the rank margin \u03b3 = 0.01 and the discount factor \u03c1 = 0.9. We also", |
|
"cite_spans": [ |
|
{ |
|
"start": 245, |
|
"end": 270, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In-KB acc. (%) SL RL ETHZ-Attn (Section 2.2) 90.88 -ETHZ-Attn + 1-hop DCA 93.69 93.20 ETHZ-Attn + 2-hop DCA 94.47 93.76 Table 4 : Ablation Study on Neighbor Entities. We compare the performance of DCA with or without neighbor entities (i.e., 2-hop vs. 1-hop). regularize the Agent model as adopted in (Ganea and Hofmann, 2017) by constraining the sum of squares of all weights in the linear layer with M axN orm = 4. When training the model, we use Adam (Kingma and Ba, 2014) with learning rate of 2e-4 until validation accuracy exceeds 92.8%, afterwards setting it to 5e-5.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 120, |
|
"end": 127, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "System", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Starting with an overview of the end-task performance, we compare DCA (using SL or RL) with several state-of-the-art systems on in-domain and cross-domain datasets. We follow prior work and report in-KB accuracy for AIDA-B and micro F1 scores for the other test sets. Table 2 summarizes results on the AIDA-B dataset, and shows that DCA-based models achieve the highest in-KB accuracy and outperforms the previous state-of-the-art neural system by near 1.6% absolute accuracy. Moreover, compared with the base models, dynamic context augmentation significantly improve absolute in-KB accuracy in models Berkeley-CNN (more than Figure 3 : Ablation Study on Different Decision Orders. We test on both in-domain (AIDA-B) and cross-domain (CWEB) datasets, using ETHZ-Attn as the local model.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 268, |
|
"end": 275, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 627, |
|
"end": 635, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Overall Performance Comparison", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "277 R R R R R R R R", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overall Performance Comparison", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": ") and ETHZ-Attn (3.3% on average). Note that, our DCA model outperforms existing global models with the same local model (Global-RNN uses Berkeley-CNN as base model, Deep-ED and Ment-Norm use ETHZ-Attn as the local model). Table 3 shows the results on the five crossdomain datasets. As shown, none of existing methods can consistently win on all datasets. DCA-based models achieve state-of-the-art performance on the MSBNC and the ACE2004 dataset. On remaining datasets, DCA-RL achieves comparable performance with other complex global models. In addition, RL-based models show on average 1.1% improvement on F1 score over the SL-based models across all the crossdomain datasets. At the same time, DCA-based methods are much more efficient, both in time complexity and in resource requirement. Detailed efficiency analysis will be presented in following sections.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 223, |
|
"end": 230, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "8%", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "1. Impact of decision order. As the DCA model consecutively links and adds all the mentions in a document, the linking order may play a key role in the final performance. In this work, we try three different linking orders: Offset links all mentions by their natural orders in the original document; Size first links mentions with smaller candidate sizes, as they tend to be easier to link; The baseline method is to link all mentions in a Random order. Figure 3 shows the performance comparison on the AIDA-B and the CWEB dataset. As shown, in general, Size usually leads to better performance than Offset and Random. However, the DCA-SL model shows poor performance on the CWEB dataset with Size order. This is mainly because the CWEB dataset is automatically generated rather than curated by human, and thus contains many noisy mentions. Some mentions in CWEB with less than three candidates are actually bad cases , where none of the candidates is the actual gold entity. Thus, such mentions will always introduce wrong information to the model, which leads to a worse performance. In contrast, the AIDA-B dataset does not have such situations. The DCA-RL model, however, still has strong performance on the CWEB dataset, which highlights its robustness to potential noises.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 454, |
|
"end": 462, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Performance Analysis", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "2. Effect of neighbor entities. In contrast to traditional global models, we include both previously linked entities and their close neighbors for global signal. Table 4 shows the effectiveness of this strategy. We observe that incorporating these neighbor entities (2-hop) significantly improve the performance (compared to 1-hop) by introducing more related information. And our analysis shows that on average 0.72% and 3.56% relative improvement of 2-hop DCA-(SL/RL) over 1-hop DCA-(SL/RL) or baseline-SL (without DCA) is statistically significant (with P-value < 0.005). This is consistent with our design of DCA. Table 5 shows the performance comparison by replacing the attention module described in Section 3 with different variants. Average Sum treats all previously linked entities equally with a uniform distribution. Soft Attention skips the pruning step for entities with low weight scores. Soft&Hard Attention stands for the strategy used in our model. It is obvious that the attention mechanism does show positive influence on the linking performance compared with Average Sum.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 162, |
|
"end": 169, |
|
"text": "Table 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 618, |
|
"end": 625, |
|
"text": "Table 5", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Performance Analysis", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "Hard pruning brings slight further improvement.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Study of different attention mechanisms.", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "4. Impact of decision length. As wrongly linked entities can introduce noise to the model, there exists a trade-off in DCA: involving more previous entities (longer historical trajectory) provides more information, and also more noise. Figure (4.a) shows how the performance of DCA changes with the number of previous entities involved. We observe that longer historical trajectories usually have a positive influence on the performance of DCA. The reason is that our attention mechanism could effectively assess and select relevant contexts for each entity mention on the fly, thus reducing potential noise.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 236, |
|
"end": 249, |
|
"text": "Figure (4.a)", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Study of different attention mechanisms.", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "As discussed in Sec. 5, the running time of a DCA enhanced model may rise linearly when the average number of candidates per mention (i.e., |E|) increases, while the global EL model increases exponentially. To validate the theory we empirically investigate the scalability of DCA, and carefully select two global EL models Ment-Norm (Le and Titov, 2018) and Deep-ED (Ganea and Hofmann, 2017) as our baselines. The reason for this choice is that our final model shares the same local model as their models, which excludes other confounding factors like implementation details. As Figure (4.c) shows, when |E| increases, the running time of these two global EL models increases shapely, while our DCA model grows linearly. On the other hand, we also observed that the resources required by the DCA model are insensitive to |E|. For example, as shown in Figure (4.b) , the memory usage of Ment-Norm and Deep-ED significantly rises as more candidates are considered, while the DCA model remains a relatively low memory usage all the time. We also measure the power consumption of Ment-Norm and DCA models, and we find that the DCA model saves up to 80% of the energy consumption over the Ment-Norm, which is another advantage for large scale production.", |
|
"cite_spans": [ |
|
{ |
|
"start": 333, |
|
"end": 353, |
|
"text": "(Le and Titov, 2018)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 579, |
|
"end": 592, |
|
"text": "Figure (4.c)", |
|
"ref_id": "FIGREF2" |
|
}, |
|
{ |
|
"start": 852, |
|
"end": 864, |
|
"text": "Figure (4.b)", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Analysis on Time Complexity", |
|
"sec_num": "6.4" |
|
}, |
|
{ |
|
"text": "Local EL methods disambiguate each mention independently according to their local contexts (Yamada et al., 2016; Chen et al., 2017; Globerson et al., 2016; Raiman and Raiman, 2018) . The per-formance is limited when sparse local contexts fail to provide sufficient disambiguation evidence.", |
|
"cite_spans": [ |
|
{ |
|
"start": 91, |
|
"end": 112, |
|
"text": "(Yamada et al., 2016;", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 113, |
|
"end": 131, |
|
"text": "Chen et al., 2017;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 132, |
|
"end": 155, |
|
"text": "Globerson et al., 2016;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 156, |
|
"end": 180, |
|
"text": "Raiman and Raiman, 2018)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "To alleviate this problem, global EL models jointly optimize the entire linking configuration. The key idea is to maximize a global coherence/similarity score between all linked entities (Hoffart et al., 2011; Ratinov et al., 2011; Cheng and Roth, 2013; Nguyen et al., 2014; Alhelbawy and Gaizauskas, 2014; Pershina et al., 2015; Guo and Barbosa, 2016; Globerson et al., 2016; Ganea and Hofmann, 2017; Le and Titov, 2018; Fang et al., 2019; Xue et al., 2019) . Despite of its significant improvement in accuracy, such global methods suffer from high complexity. To this end, some works try to relax the assumption of all-mention coherence, e.g. with pairwise coherence, to improve efficiency (Phan et al., 2018), but exact inference remains an NPhard problem. Approximation methods are hence proposed to achieve reasonably good results with less cost. (Shen et al., 2012) propose the iterative substitution method to greedily substitute linking assignment of one mention at a time that can improve the global objective. Another common practice is to use Loopy Belief Propagation for inference (Ganea and Hofmann, 2017; Le and Titov, 2018) . Both approximation methods iteratively improve the global assignment, but are still computationally expensive with unbounded number of iterations. In contrast, the proposed DCA method only requires one pass through the document. Global signals are accumulated as dynamic contexts for local decisions, which significantly reduces computational complexity and memory consumption.", |
|
"cite_spans": [ |
|
{ |
|
"start": 187, |
|
"end": 209, |
|
"text": "(Hoffart et al., 2011;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 210, |
|
"end": 231, |
|
"text": "Ratinov et al., 2011;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 232, |
|
"end": 253, |
|
"text": "Cheng and Roth, 2013;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 254, |
|
"end": 274, |
|
"text": "Nguyen et al., 2014;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 275, |
|
"end": 306, |
|
"text": "Alhelbawy and Gaizauskas, 2014;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 307, |
|
"end": 329, |
|
"text": "Pershina et al., 2015;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 330, |
|
"end": 352, |
|
"text": "Guo and Barbosa, 2016;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 353, |
|
"end": 376, |
|
"text": "Globerson et al., 2016;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 377, |
|
"end": 401, |
|
"text": "Ganea and Hofmann, 2017;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 402, |
|
"end": 421, |
|
"text": "Le and Titov, 2018;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 422, |
|
"end": 440, |
|
"text": "Fang et al., 2019;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 441, |
|
"end": 458, |
|
"text": "Xue et al., 2019)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 852, |
|
"end": 871, |
|
"text": "(Shen et al., 2012)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 1119, |
|
"end": 1138, |
|
"text": "Le and Titov, 2018)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "In this paper we propose Dynamic Context Augmentation as a plug-and-enhance module for local Entity Linking models. In contrast to existing global EL models, DCA only requires one pass through the document. To incorporate global disambiguation signals, DCA accumulates knowledge from previously linked entities for fast inference. Extensive experiments on several public benchmarks with different learning settings, base models, decision orders and attention mechanisms demonstrate both the effectiveness and efficiency of DCA-based models. The scalability of DCAbased models make it possible to handle largescale data with long documents. Related code and data has been published and may hopefully benefit the community.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "https://github.com/lephong/mulrel-nel", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Graph ranking for collective named entity disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Ayman", |
|
"middle": [], |
|
"last": "Alhelbawy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Gaizauskas", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "75--80", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ayman Alhelbawy and Robert Gaizauskas. 2014. Graph ranking for collective named entity disam- biguation. In Proceedings of the 52nd Annual Meet- ing of the Association for Computational Linguistics (Volume 2: Short Papers), volume 2, pages 75-80.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Fast and space-efficient entity linking for queries", |
|
"authors": [ |
|
{ |
|
"first": "Roi", |
|
"middle": [], |
|
"last": "Blanco", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Giuseppe", |
|
"middle": [], |
|
"last": "Ottaviano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edgar", |
|
"middle": [], |
|
"last": "Meij", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the Eighth ACM International Conference on Web Search and Data Mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "179--188", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roi Blanco, Giuseppe Ottaviano, and Edgar Meij. 2015. Fast and space-efficient entity linking for queries. In Proceedings of the Eighth ACM Interna- tional Conference on Web Search and Data Mining, pages 179-188. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Bilinear joint learning of word and entity embeddings for entity linking", |
|
"authors": [ |
|
{ |
|
"first": "Hui", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Baogang", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonghuai", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jifang", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenhao", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Neurocomputing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hui Chen, Baogang Wei, Yonghuai Liu, Yiming Li, Jifang Yu, and Wenhao Zhu. 2017. Bilinear joint learning of word and entity embeddings for entity linking. Neurocomputing.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Relational inference for wikification", |
|
"authors": [ |
|
{ |
|
"first": "Xiao", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1787--1796", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiao Cheng and Dan Roth. 2013. Relational inference for wikification. In Proceedings of the 2013 Con- ference on Empirical Methods in Natural Language Processing, pages 1787-1796.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Deep reinforcement learning for mention-ranking coreference models", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2256--2262", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kevin Clark and Christopher D Manning. 2016. Deep reinforcement learning for mention-ranking coref- erence models. In Proceedings of the 2016 Con- ference on Empirical Methods in Natural Language Processing, pages 2256-2262.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Large-scale named entity disambiguation based on wikipedia data", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Silviu Cucerzan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Silviu Cucerzan. 2007. Large-scale named entity dis- ambiguation based on wikipedia data. In Proceed- ings of the 2007 Joint Conference on Empirical Methods in Natural Language Processing and Com- putational Natural Language Learning (EMNLP- CoNLL).", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Joint entity linking with deep reinforcement learning", |
|
"authors": [ |
|
{ |
|
"first": "Zheng", |
|
"middle": [], |
|
"last": "Fang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanan", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qian", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dongjie", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhenyu", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanbing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "The World Wide Web Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "438--447", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zheng Fang, Yanan Cao, Qian Li, Dongjie Zhang, Zhenyu Zhang, and Yanbing Liu. 2019. Joint en- tity linking with deep reinforcement learning. In The World Wide Web Conference, pages 438-447. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Relation mention extraction from noisy data with hierarchical reinforcement learning", |
|
"authors": [ |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minlie", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yijie", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaoyan", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1811.01237" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jun Feng, Minlie Huang, Yijie Zhang, Yang Yang, and Xiaoyan Zhu. 2018. Relation mention extrac- tion from noisy data with hierarchical reinforcement learning. arXiv preprint arXiv:1811.01237.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Capturing semantic similarity for entity linking with convolutional neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Francis-Landau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Durrett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1604.00734" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Francis-Landau, Greg Durrett, and Dan Klein. 2016. Capturing semantic similarity for entity linking with convolutional neural networks. arXiv preprint arXiv:1604.00734.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Deep joint entity disambiguation with local neural attention", |
|
"authors": [ |
|
{ |
|
"first": "Eugen", |
|
"middle": [], |
|
"last": "Octavian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Ganea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hofmann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1704.04920" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Octavian-Eugen Ganea and Thomas Hofmann. 2017. Deep joint entity disambiguation with local neural attention. arXiv preprint arXiv:1704.04920.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Collective entity resolution with multi-focal attention", |
|
"authors": [ |
|
{ |
|
"first": "Nevena", |
|
"middle": [], |
|
"last": "Amir Globerson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soumen", |
|
"middle": [], |
|
"last": "Lazic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amarnag", |
|
"middle": [], |
|
"last": "Chakrabarti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Subramanya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando", |
|
"middle": [], |
|
"last": "Ringaard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "621--631", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amir Globerson, Nevena Lazic, Soumen Chakrabarti, Amarnag Subramanya, Michael Ringaard, and Fer- nando Pereira. 2016. Collective entity resolution with multi-focal attention. In Proceedings of the 54th Annual Meeting of the Association for Compu- tational Linguistics (Volume 1: Long Papers), vol- ume 1, pages 621-631.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Robust named entity disambiguation with random walks. Semantic Web", |
|
"authors": [ |
|
{ |
|
"first": "Zhaochen", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Denilson", |
|
"middle": [], |
|
"last": "Barbosa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--21", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhaochen Guo and Denilson Barbosa. 2016. Robust named entity disambiguation with random walks. Semantic Web, (Preprint):1-21.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Learning entity representation for entity disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Zhengyan", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shujie", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mu", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Longkai", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Houfeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "30--34", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhengyan He, Shujie Liu, Mu Li, Ming Zhou, Longkai Zhang, and Houfeng Wang. 2013. Learning entity representation for entity disambiguation. In Pro- ceedings of the 51st Annual Meeting of the Associa- tion for Computational Linguistics (Volume 2: Short Papers), volume 2, pages 30-34.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Robust disambiguation of named entities in text", |
|
"authors": [ |
|
{ |
|
"first": "Johannes", |
|
"middle": [], |
|
"last": "Hoffart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohamed", |
|
"middle": [ |
|
"Amir" |
|
], |
|
"last": "Yosef", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilaria", |
|
"middle": [], |
|
"last": "Bordino", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hagen", |
|
"middle": [], |
|
"last": "F\u00fcrstenau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manfred", |
|
"middle": [], |
|
"last": "Pinkal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc", |
|
"middle": [], |
|
"last": "Spaniol", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bilyana", |
|
"middle": [], |
|
"last": "Taneva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Thater", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerhard", |
|
"middle": [], |
|
"last": "Weikum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "782--792", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Johannes Hoffart, Mohamed Amir Yosef, Ilaria Bor- dino, Hagen F\u00fcrstenau, Manfred Pinkal, Marc Span- iol, Bilyana Taneva, Stefan Thater, and Gerhard Weikum. 2011. Robust disambiguation of named entities in text. In Proceedings of the Conference on Empirical Methods in Natural Language Process- ing, pages 782-792. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Trading accuracy for faster named entity linking", |
|
"authors": [ |
|
{ |
|
"first": "Kristy", |
|
"middle": [], |
|
"last": "Hughes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joel", |
|
"middle": [], |
|
"last": "Nothman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James R", |
|
"middle": [], |
|
"last": "Curran", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the Australasian Language Technology Association Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "32--40", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kristy Hughes, Joel Nothman, and James R Curran. 2014. Trading accuracy for faster named entity link- ing. In Proceedings of the Australasian Language Technology Association Workshop 2014, pages 32- 40.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1412.6980" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Improving entity linking by modeling latent relations between mentions", |
|
"authors": [ |
|
{ |
|
"first": "Phong", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Titov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1804.10637" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Phong Le and Ivan Titov. 2018. Improving entity link- ing by modeling latent relations between mentions. arXiv preprint arXiv:1804.10637.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Learning to link with wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Milne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Ian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Witten", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 17th ACM conference on Information and knowledge management", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "509--518", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Milne and Ian H Witten. 2008. Learning to link with wikipedia. In Proceedings of the 17th ACM conference on Information and knowledge manage- ment, pages 509-518. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Rectified linear units improve restricted boltzmann machines", |
|
"authors": [ |
|
{ |
|
"first": "Vinod", |
|
"middle": [], |
|
"last": "Nair", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Hinton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 27th international conference on machine learning (ICML-10)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "807--814", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vinod Nair and Geoffrey E Hinton. 2010. Rectified linear units improve restricted boltzmann machines. In Proceedings of the 27th international conference on machine learning (ICML-10), pages 807-814.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Improving information extraction by acquiring external evidence with reinforcement learning", |
|
"authors": [ |
|
{ |
|
"first": "Karthik", |
|
"middle": [], |
|
"last": "Narasimhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Yala", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Regina", |
|
"middle": [], |
|
"last": "Barzilay", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1603.07954" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Karthik Narasimhan, Adam Yala, and Regina Barzilay. 2016. Improving information extraction by acquir- ing external evidence with reinforcement learning. arXiv preprint arXiv:1603.07954.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Aida-light: Highthroughput named-entity disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Johannes", |
|
"middle": [], |
|
"last": "Dat Ba Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Hoffart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerhard", |
|
"middle": [], |
|
"last": "Theobald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Weikum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dat Ba Nguyen, Johannes Hoffart, Martin Theobald, and Gerhard Weikum. 2014. Aida-light: High- throughput named-entity disambiguation. LDOW, 1184.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Joint learning of local and global features for entity linking via neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Nicolas", |
|
"middle": [], |
|
"last": "Thien Huu Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mariano", |
|
"middle": [ |
|
"Rodriguez" |
|
], |
|
"last": "Fauceglia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oktie", |
|
"middle": [], |
|
"last": "Muro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alfio", |
|
"middle": [], |
|
"last": "Hassanzadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Massimiliano Gliozzo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sadoghi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2310--2320", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thien Huu Nguyen, Nicolas Fauceglia, Mariano Ro- driguez Muro, Oktie Hassanzadeh, Alfio Massimil- iano Gliozzo, and Mohammad Sadoghi. 2016. Joint learning of local and global features for entity link- ing via neural networks. In Proceedings of COLING 2016, the 26th International Conference on Compu- tational Linguistics: Technical Papers, pages 2310- 2320.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. Glove: Global vectors for word representation. In Proceedings of the 2014 confer- ence on empirical methods in natural language pro- cessing (EMNLP), pages 1532-1543.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Personalized page rank for named entity disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Pershina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yifan", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Grishman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "238--243", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maria Pershina, Yifan He, and Ralph Grishman. 2015. Personalized page rank for named entity disam- biguation. In Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, pages 238-243.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Pair-linking for collective entity disambiguation: Two could be better than all", |
|
"authors": [ |
|
{ |
|
"first": "Aixin", |
|
"middle": [], |
|
"last": "Minh C Phan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jialong", |
|
"middle": [], |
|
"last": "Tay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chenliang", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "IEEE Transactions on Knowledge and Data Engineering", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Minh C Phan, Aixin Sun, Yi Tay, Jialong Han, and Chenliang Li. 2018. Pair-linking for collective en- tity disambiguation: Two could be better than all. IEEE Transactions on Knowledge and Data Engi- neering.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Deeptype: Multilingual entity linking by neural type system evolution", |
|
"authors": [ |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Raiman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Olivier", |
|
"middle": [], |
|
"last": "Raiman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1802.01021" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jonathan Raiman and Olivier Raiman. 2018. Deep- type: Multilingual entity linking by neural type sys- tem evolution. arXiv preprint arXiv:1802.01021.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Local and global algorithms for disambiguation to wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "Lev", |
|
"middle": [], |
|
"last": "Ratinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Doug", |
|
"middle": [], |
|
"last": "Downey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Anderson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1375--1384", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lev Ratinov, Dan Roth, Doug Downey, and Mike Anderson. 2011. Local and global algorithms for disambiguation to wikipedia. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies-Volume 1, pages 1375-1384. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Entity linking with a knowledge base: Issues, techniques, and solutions", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianyong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiawei", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "IEEE Transactions on Knowledge and Data Engineering", |
|
"volume": "27", |
|
"issue": "2", |
|
"pages": "443--460", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei Shen, Jianyong Wang, and Jiawei Han. 2015. En- tity linking with a knowledge base: Issues, tech- niques, and solutions. IEEE Transactions on Knowl- edge and Data Engineering, 27(2):443-460.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Linden: linking named entities with knowledge base via semantic knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianyong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ping", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 21st international conference on World Wide Web", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "449--458", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei Shen, Jianyong Wang, Ping Luo, and Min Wang. 2012. Linden: linking named entities with knowl- edge base via semantic knowledge. In Proceedings of the 21st international conference on World Wide Web, pages 449-458. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Dropout: a simple way to prevent neural networks from overfitting", |
|
"authors": [ |
|
{ |
|
"first": "Nitish", |
|
"middle": [], |
|
"last": "Srivastava", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Krizhevsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "The Journal of Machine Learning Research", |
|
"volume": "15", |
|
"issue": "1", |
|
"pages": "1929--1958", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nitish Srivastava, Geoffrey Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdinov. 2014. Dropout: a simple way to prevent neural networks from overfitting. The Journal of Machine Learning Research, 15(1):1929-1958.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Reinforcement learning: An introduction", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Richard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Sutton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Barto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Richard S Sutton and Andrew G Barto. 1998. Re- inforcement learning: An introduction, volume 1. MIT press Cambridge.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Neural finegrained entity type classification with hierarchyaware loss. north american chapter of the association for computational linguistics", |
|
"authors": [ |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Denilson", |
|
"middle": [], |
|
"last": "Barbosa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "16--25", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peng Xu and Denilson Barbosa. 2018. Neural fine- grained entity type classification with hierarchy- aware loss. north american chapter of the associ- ation for computational linguistics, 1:16-25.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Neural collective entity linking based on recurrent random walk network learning", |
|
"authors": [ |
|
{ |
|
"first": "Mengge", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weiming", |
|
"middle": [], |
|
"last": "Cai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jinsong", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Linfeng", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yubin", |
|
"middle": [], |
|
"last": "Ge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yubao", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bin", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1906.09320" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mengge Xue, Weiming Cai, Jinsong Su, Linfeng Song, Yubin Ge, Yubao Liu, and Bin Wang. 2019. Neural collective entity linking based on recurrent random walk network learning. arXiv preprint arXiv:1906.09320.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Joint learning of the embedding of words and entities for named entity disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Ikuya", |
|
"middle": [], |
|
"last": "Yamada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hiroyuki", |
|
"middle": [], |
|
"last": "Shindo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hideaki", |
|
"middle": [], |
|
"last": "Takeda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshiyasu", |
|
"middle": [], |
|
"last": "Takefuji", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1601.01343" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ikuya Yamada, Hiroyuki Shindo, Hideaki Takeda, and Yoshiyasu Takefuji. 2016. Joint learning of the em- bedding of words and entities for named entity dis- ambiguation. arXiv preprint arXiv:1601.01343.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Collective entity disambiguation with structured gradient tree boosting", |
|
"authors": [ |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ozan", |
|
"middle": [], |
|
"last": "Irsoy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kazi Shefaet", |
|
"middle": [], |
|
"last": "Rahman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1802.10229" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yi Yang, Ozan Irsoy, and Kazi Shefaet Rahman. 2018. Collective entity disambiguation with struc- tured gradient tree boosting. arXiv preprint arXiv:1802.10229.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "An Illustration of the Dynamic Context Augmentation process." |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "Neural attention mechanism on the dynamic context. The soft attention module assigns higher weights to entities that are more relevant to the target mention. The hard attention module only considers top K entities as dynamic contexts." |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "(a) In-KB accuracy as a function of decision length on AIDA-B dataset; (b) Memory usage as a function of the number of candidates on AIDA-B dataset; (c) Runtime cost (at inference time) as a function of the number of mentions. (each dot represents a document of AIDA-B with |E| = 35)." |
|
}, |
|
"TABREF0": { |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td>Dataset</td><td># mention</td><td># doc</td><td>Mentions per doc</td><td>Gold recall</td></tr><tr><td>AIDA-train</td><td>18448</td><td>946</td><td>19.5</td><td>-</td></tr><tr><td>AIDA-A</td><td>4791</td><td>216</td><td>22.1</td><td>97.3</td></tr><tr><td>AIDA-B</td><td>4485</td><td>231</td><td>19.4</td><td>98.3</td></tr><tr><td>MSNBC</td><td>656</td><td>20</td><td>32.8</td><td>98.5</td></tr><tr><td>AQUAINT</td><td>727</td><td>50</td><td>14.5</td><td>94.2</td></tr><tr><td>ACE2004</td><td>257</td><td>36</td><td>7.1</td><td>90.6</td></tr><tr><td>CWEB</td><td>11154</td><td>320</td><td>34.8</td><td>91.1</td></tr><tr><td>WIKI</td><td>6821</td><td>320</td><td>21.3</td><td>92.4</td></tr></table>", |
|
"type_str": "table", |
|
"text": "obtain \u03a6 g by calculating all Dataset Statistics. Gold recall is the percentage of mentions for which the candidate entities contain the ground truth entity." |
|
}, |
|
"TABREF2": { |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td>-domain Performance Comparison on the</td></tr><tr><td>AIDA-B Dataset. For our method we show 95% confi-</td></tr><tr><td>dence intervals obtained over 5 runs. DCA-based mod-</td></tr><tr><td>els achieve the best reported scores on this benchmark.</td></tr></table>", |
|
"type_str": "table", |
|
"text": "In" |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td>System</td><td>MSBNC</td><td>AQUAINT</td><td>ACE2004</td><td>CWEB</td><td>WIKI</td></tr><tr><td>AIDA (Hoffart et al., 2011)</td><td>79</td><td>56</td><td>80</td><td>58.6</td><td>63</td></tr><tr><td>GLOW (Ratinov et al., 2011)</td><td>75</td><td>83</td><td>82</td><td>56.2</td><td>67.2</td></tr><tr><td>RI (Cheng and Roth, 2013)</td><td>90</td><td>90</td><td>86</td><td>67.5</td><td>73.4</td></tr><tr><td>WNED (Guo and Barbosa, 2016)</td><td>92</td><td>87</td><td>88</td><td>77</td><td>84.5</td></tr><tr><td>Deep-ED (Ganea and Hofmann, 2017)</td><td>93.7</td><td>88.5</td><td>88.5</td><td>77.9</td><td>77.5</td></tr><tr><td>Ment-Norm (Le and Titov, 2018)</td><td>93.9</td><td>88.3</td><td>89.9</td><td>77.5</td><td>78.0</td></tr><tr><td>Prior (p(e|m)) (Ganea and Hofmann, 2017)</td><td>89.3</td><td>83.2</td><td>84.4</td><td>69.8</td><td>64.2</td></tr><tr><td>Berkeley-CNN (Section 2.2)</td><td>89.05</td><td>80.55</td><td>87.32</td><td>67.97</td><td>60.27</td></tr><tr><td>Berkeley-CNN + DCA-SL</td><td colspan=\"5\">93.38 \u00b1 0.2 85.63 \u00b1 0.3 88.73 \u00b1 0.3 71.01 \u00b1 0.1 72.55 \u00b1 0.2</td></tr><tr><td>Berkeley-CNN + DCA-RL</td><td colspan=\"5\">93.65 \u00b1 0.2 88.53 \u00b1 0.3 89.73 \u00b1 0.4 72.66 \u00b1 0.4 73.98 \u00b1 0.2</td></tr><tr><td>ETHZ-Attn (Section 2.2)</td><td>91.97</td><td>84.06</td><td>86.92</td><td>70.07</td><td>74.37</td></tr><tr><td>ETHZ-Attn + DCA-SL</td><td colspan=\"5\">94.57 \u00b1 0.2 87.38 \u00b1 0.5 89.44 \u00b1 0.4 73.47 \u00b1 0.1 78.16 \u00b1 0.1</td></tr><tr><td>ETHZ-Attn + DCA-RL</td><td colspan=\"5\">93.80 \u00b1 0.0 88.25 \u00b1 0.4 90.14 \u00b1 0.0 75.59 \u00b1 0.3 78.84 \u00b1 0.2</td></tr><tr><td/><td/><td colspan=\"4\">Compared Methods. We compare our meth-</td></tr><tr><td/><td/><td colspan=\"4\">ods with following existing systems that report</td></tr><tr><td/><td/><td colspan=\"4\">state-of-the-art results on the test datasets: AIDA-</td></tr><tr><td/><td/><td colspan=\"4\">light (Nguyen et al., 2014) uses a kind of two-</td></tr><tr><td/><td/><td colspan=\"4\">stage collective mapping algorithm and designs</td></tr><tr><td/><td/><td colspan=\"4\">several domain or category related coherence fea-</td></tr><tr><td/><td/><td colspan=\"4\">tures. WNED (Guo and Barbosa, 2016) applies</td></tr><tr><td/><td/><td colspan=\"4\">random walks on carefully built disambiguation</td></tr><tr><td/><td/><td colspan=\"4\">graphs and uses a greedy, iterative and global</td></tr><tr><td/><td/><td colspan=\"4\">disambiguation algorithm based on Information</td></tr><tr><td/><td/><td colspan=\"4\">Theory. Global-RNN (Nguyen et al., 2016) de-</td></tr><tr><td/><td/><td colspan=\"4\">velops a framework based on convolutional neu-</td></tr><tr><td/><td/><td colspan=\"4\">ral networks and recurrent neural networks to</td></tr><tr><td/><td/><td colspan=\"4\">simultaneously model the local and global fea-</td></tr><tr><td/><td/><td colspan=\"4\">tures. MulFocal-Att (Globerson et al., 2016)</td></tr><tr><td/><td/><td colspan=\"4\">adopts a coherence model with a multi-focal at-</td></tr><tr><td/><td/><td colspan=\"4\">tention mechanism. Deep-ED (Ganea and Hof-</td></tr><tr><td/><td/><td colspan=\"4\">mann, 2017) leverages learned neural representa-</td></tr></table>", |
|
"type_str": "table", |
|
"text": "." |
|
}, |
|
"TABREF4": { |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td>tions, and uses a deep learning model combined</td></tr><tr><td>with a neural attention mechanism and graphical</td></tr><tr><td>models. Ment-Norm (Le and Titov, 2018) im-</td></tr><tr><td>proving the Deep-ED model by modeling latent</td></tr><tr><td>relations between mentions.</td></tr></table>", |
|
"type_str": "table", |
|
"text": "Performance Comparison on Cross-domain Datasets using F1 score (%). The best results are in bold. Note that our own results all retain two decimal places. Other results with uncertain amount of decimal places are directly retrieved from their original paper." |
|
}, |
|
"TABREF6": { |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td>Results (using ETHZ-Attn</td></tr></table>", |
|
"type_str": "table", |
|
"text": "Study on Different Attention Mechanisms on the Dynamic Context." |
|
} |
|
} |
|
} |
|
} |