|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:47:11.440747Z" |
|
}, |
|
"title": "Answering Complex Questions by Combining Information from Curated and Extracted Knowledge Bases", |
|
"authors": [ |
|
{ |
|
"first": "Nikita", |
|
"middle": [], |
|
"last": "Bhutani", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Michigan", |
|
"location": { |
|
"settlement": "Ann Arbor" |
|
} |
|
}, |
|
"email": "nbhutani@umich.edu" |
|
}, |
|
{ |
|
"first": "Xinyi", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Michigan", |
|
"location": { |
|
"settlement": "Ann Arbor" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Kun", |
|
"middle": [], |
|
"last": "Qian", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research", |
|
"location": { |
|
"settlement": "Almaden" |
|
} |
|
}, |
|
"email": "qian.kun@ibm.com" |
|
}, |
|
{ |
|
"first": "Yunyao", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research", |
|
"location": { |
|
"settlement": "Almaden" |
|
} |
|
}, |
|
"email": "yunyaoli@us.ibm.com" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Jagadish", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Michigan", |
|
"location": { |
|
"settlement": "Ann Arbor" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Knowledge-based question answering (KB-QA) has long focused on simple questions that can be answered from a single knowledge source, a manually curated or an automatically extracted KB. In this work, we look at answering complex questions which often require combining information from multiple sources. We present a novel KB-QA system, MULTIQUE, which can map a complex question to a complex query pattern using a sequence of simple queries each targeted at a specific KB. It finds simple queries using a neural-network based model capable of collective inference over textual relations in extracted KB and ontological relations in curated KB. Experiments show that our proposed system outperforms previous KB-QA systems on benchmark datasets, ComplexWebQuestions and WebQuestionsSP.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Knowledge-based question answering (KB-QA) has long focused on simple questions that can be answered from a single knowledge source, a manually curated or an automatically extracted KB. In this work, we look at answering complex questions which often require combining information from multiple sources. We present a novel KB-QA system, MULTIQUE, which can map a complex question to a complex query pattern using a sequence of simple queries each targeted at a specific KB. It finds simple queries using a neural-network based model capable of collective inference over textual relations in extracted KB and ontological relations in curated KB. Experiments show that our proposed system outperforms previous KB-QA systems on benchmark datasets, ComplexWebQuestions and WebQuestionsSP.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Knowledge-based question answering (KB-QA) computes answers to natural language questions based on a knowledge base. Some systems use a curated KB (Bollacker et al., 2008) , and others use an extracted KB (Fader et al., 2014) . The choice of the KB depends on its coverage and knowledge representation: a curated KB uses ontological relations but has limited coverage, while an extracted KB offers broad coverage with textual relations. Commonly, a KB-QA system finds answers by mapping the question to a structured query over the KB. For instance, example question 1 in Fig. 1 can be answered with a query (Rihanna, place of birth, ?) over a curated KB or (Rihanna, 'was born in', ?) over an extracted KB. Most existing systems focus on simple questions answerable with a single KB. Limited efforts have been spent to support complex questions that * NB and XZ contributed equally to this work. require inference over multiple relations and entities. For instance, to answer question 2 in Fig. 1 , we need to infer relations corresponding to expressions 'author of' and 'attend'. In practice, a single KB alone may not provide both high coverage and ontological knowledge to answer such questions. A curated KB might provide information about educational institutions, while an extracted KB might contain information about authorship. Leveraging multiple KBs to answer complex questions is an attractive approach but is seldom studied. Existing methods assume a simple abstraction (Fader et al., 2014) over the KBs and have limited ability to aggregate facts across KBs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 147, |
|
"end": 171, |
|
"text": "(Bollacker et al., 2008)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 205, |
|
"end": 225, |
|
"text": "(Fader et al., 2014)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 703, |
|
"end": 706, |
|
"text": "KB.", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1482, |
|
"end": 1502, |
|
"text": "(Fader et al., 2014)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 571, |
|
"end": 577, |
|
"text": "Fig. 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 990, |
|
"end": 996, |
|
"text": "Fig. 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We aim to integrate inference over curated and extracted KBs for answering complex questions. Combining information from multiple sources offers two benefits: evidence scattered across multiple KBs can be aggregated, and evidence from different KBs can be used to complement each other. For instance, inference over ontological relation book author can benefit from textual relation 'is written by'. On the other hand, evidence matching 'attend' may exclusively be in the curated KB.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Example 1 What college did the author of 'The Hobbit' attend? Simple Queries: G 1 : The Hobbit 'is wrtten by' ?a. tem, MULTIQUE, which constructs query patterns to answer complex questions from simple queries each targeting a specific KB. We build upon recent work on semantic parsing using neural network models (Bao et al., 2016; Yih et al., 2015) to learn the simple queries for complex questions. These methods follow an enumerate-encode-compare approach, where candidate queries are first collected and encoded as semantic vectors, which are then compared to the vector representation of the question. The candidate with the highest semantic similarity is then executed over the KB. We propose two key modifications to adapt these models to leverage information from multiple KBs and support complex questions. First, to enable collective inference over ontological and textual relations from the KBs, we align the different relation forms and learn unified semantic representations. Second, due to the lack of availability of fullyannotated queries to train the model, we learn with implicit supervision signals in the form of answers for questions. Our main contributions are:", |
|
"cite_spans": [ |
|
{ |
|
"start": 313, |
|
"end": 331, |
|
"text": "(Bao et al., 2016;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 332, |
|
"end": 349, |
|
"text": "Yih et al., 2015)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We propose a novel KB-QA system, MULTI-QUE, that combines information from curated and extracted knowledge bases to answer complex questions. To the best of our knowledge, this is the first attempt to answer complex questions from multiple knowledge sources. \u2022 To leverage information from multiple KBs, we construct query patterns for complex questions using simple queries each targeting a specific KB. (Section 3 and 5). \u2022 We propose a neural-network based model that aligns diverse relation forms from multiple KBs for collective inference. The model learns to score simple queries using implicit supervision from answers to complex questions (Section 4). \u2022 We provide extensive evaluation on benchmarks demonstrating the effectiveness of proposed ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our goal is to map a complex question Q to a query G, which can be executed against a combination of curated KB K c and extracted KB K o . Knowledge Bases. The background knowledge", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task and Overview", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "source K= {K c , K o } is denoted as K=(V, E, R),", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task and Overview", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "where V is the set of entities and E is a set of triples (s, r, o). A triple denotes a relation r \u2208 R between subject s \u2208 V and object o \u2208 V. The relation set R is a collection of ontological relations R o from K c and textual relations R t from K o . A higher order relation is expressed using multiple triples connected using a special CVT node.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task and Overview", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Complex Question, Q corresponds to a query G which has more than one relation and a single query focus ?x. G is a sequence of partial queries G = (G 1 , G 2 , .., G o ) connected via different join conditions. A partial query has four basic elements: a seed entity s r is the root of the query, a variable node o v corresponds to an answer to the query, a main relation path (s r , p, o v ) is the path that links s r to o v by one or two edges from either R o or R t , and constraints take the form of an entity linked to the main relation by a relation c. By definition, each partial query targets a specific KB. A composition tree C describes how the query G is constructed and evaluated given the partial queries. It includes two functions, simQA and join. simQA is the model for finding simple queries. It enumerates candidates for a simple query, encodes and compares them with the question representation, and evaluates the best candidate. join describes how to join two partial queries i.e. whether they share the query focus or another variable node. Fig. 2 shows the partial queries and composition tree for the running example 1. Overview. Given a complex input question, the task is to first compute a composition tree that describes how to break down the inference into simple partial queries. We then have to gather can- didates for each partial query from both curated and extracted KBs. For each candidate, we have to measure its semantic similarity to the question using a neural-network based model that should be capable of inference over different forms of relations. We then have to join the different partial queries to find the complex query for the question. Since there can be multiple ways to answer a complex question, we derive several full query derivations. We rank them based on the semantic similarity scores of their partial queries, query structure and entity linking scores. We execute the best derivation over the multiple KBs. Fig. 3 shows the architecture of our proposed system, MULTIQUE.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1060, |
|
"end": 1066, |
|
"text": "Fig. 2", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 1964, |
|
"end": 1970, |
|
"text": "Fig. 3", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Task and Overview", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We first describe how we find candidates for partial queries given an input question. We use a staged generation method with staged states and actions. Compared to previous methods (Yih et al., 2015; Luo et al., 2018) which assume a question has one main relation, our strategy can handle complex questions which have multiple main relations (and hence partial queries). We include a new action A t that denotes the end of the search for a partial query and transition to a state S t . State S t refers back to the composition tree to determine the join condition between the current partial query and the next query. If they share an answer node, candidate generation for the subsequent query can resume independently. Otherwise, it waits for the answers to the current query. We generate (entity, mention) pairs for a question using entity linking (Bast and Haussmann, 2015) and then find elements for query candidates. Fig. 4 depicts our staged generation process. Identify seed entity. The seed s r for a partial query is a linked entity in the question or an answer of a previously evaluated partial query. Identify main relation path. Given a seed entity, we consider all 1-hop and 2-hop paths p. These include both ontological and textual relations. The other end of the path is the variable node o v .", |
|
"cite_spans": [ |
|
{ |
|
"start": 181, |
|
"end": 199, |
|
"text": "(Yih et al., 2015;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 200, |
|
"end": 217, |
|
"text": "Luo et al., 2018)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 850, |
|
"end": 876, |
|
"text": "(Bast and Haussmann, 2015)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 922, |
|
"end": 928, |
|
"text": "Fig. 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Partial Query Candidate Generation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Identify constraints. We next find entity and type constraints. We consider entities that can be connected using constraint relations is a relations 1 to the variable node o v . We also consider entities connected to the variables on the relation path via a single relation. We consider all subsets of constraints to enable queries with multiple constraints. Transition to next partial query. Once candidates of a partial query G i are collected, we refer to the composition tree to determine the start state of the next partial query G i+1 . If the next operation is simQA, we compute the semantic similarity of the candidates of G i using our semantic matching model and evaluate K-best candidates. The answers form the seed for collecting candidates for G i+1 . Otherwise, candidate generation resumes with non-overlapping entity links in G i .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Partial Query Candidate Generation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We now describe our neural-network based model which infers over different relation forms and computes the semantic similarity of a partial query candidate to the question. Fig. 5 shows the architecture of our model. To encode the question, we replace all seed (constraint) entity mentions used in the query by dummy tokens w E (w C ). To encode the partial query, we consider its query elements, namely the main relation path and constraint relations. Given the vector representations q for the question Q and g for the partial query G i , we concatenate them and feed a multi-layer perceptron (MLP). The MLP outputs a scalar which we use as the semantic similarity S sem (Q, G i ). We describe in detail the encoding methods for the question and different relation forms in the main relation path. We also describe other design elements and the learning objective.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 173, |
|
"end": 179, |
|
"text": "Fig. 5", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Semantic Matching", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Encoding question. We encode a question Q using its token sequence and dependency structure. Since a complex question tends to be long, encoding its dependency tree captures any long-range dependencies. Let w 1 , w 2 , . . . , w n be the tokens in Q, where seed (constraint) entity mentions have been replaced with w E (w C ). We map the tokens to vectors q w 1 , q w 2 , . . . , q w n using an embedding matrix E w and use an LSTM to encode the sequence to a latent vector q w . Similarly, we encode the dependency tree into a latent vector q dep .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Encoding main relation path. The main relation path can have different forms, a textual relation from K o or an ontological relation from K c . In order to collectively infer over them in the same space, we first align the textual relations to ontological relations. For instance, we find textual relations'is author of', 'written by' can be aligned to ontological relation book.author. We describe how we derive the relation alignments in Sec. 4.2. Given a relation alignment, we encode each relation form i in the alignment to a latent vector r i . We apply a max pooling over the latent vectors of different relations in the alignment to obtain a unified semantic representation over the different relation forms. Doing so enables the model to learn better representations of an ontological relation which has complementary textual relations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "To encode each relation form into vector r i , we consider both sequence of tokens and ids (Luo et al., 2018) . For instance, the id sequence of the relation in Fig. 5 is {book author}, while its token sequence is {'book', 'author'}. We embed the tokens into vectors using an embedding matrix and use average embedding r w as the token-level representation. We translate the relation directly using another embedding matrix E r of relation paths to derive its id-level representation r id i . The vector representation of a path then is", |
|
"cite_spans": [ |
|
{ |
|
"start": 91, |
|
"end": 109, |
|
"text": "(Luo et al., 2018)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 161, |
|
"end": 167, |
|
"text": "Fig. 5", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "r i = [r w i ; r id i ].", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Encoding constraints. Similarly, we encode the constraint relations c i in by combining its tokenlevel representation c w i and id-level representation c id i . Given the unified vector representation of a relation path, and the latent vectors of the constraint relations, we apply max pooling to obtain the compositional semantic representation g of the query.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Attention mechanism. Simple questions contain expressions for matching one main relation path. A complex question, however, has expressions for matching multiple relation paths, which could interfere with each other. For instance, words 'college' and 'attend' can distract the matching of the phrase 'author of' to the relation book.author. We mitigate this issue by improving the question representation using an attention mechanism (Luong et al., 2015) . The idea is to learn to emphasize parts of the question that are relevant to a context derived using the partial query vector g. Formally, given all hidden vectors h t at time step t \u2208 {1, 2, . . . , n} of the token-level representation of the question, we derive a context vector c as the weighted sum of all the hidden states:", |
|
"cite_spans": [ |
|
{ |
|
"start": 434, |
|
"end": 454, |
|
"text": "(Luong et al., 2015)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "c = n t=1 \u03b1 t h t", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "where \u03b1 t corresponds to an attention weight. The attention weights are computed as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u03b1 = sof tmax(W tanh(W q q w + W g g))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "where W, W g , W q are network parameters. The attention weights indicate how much the model focuses on each token given a partial query.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Objective function. We concatenate the context vector c, question dependency vector q dep and query vector g and feed to a multi-layer perceptron (MLP). It is a feed-forward neural network with two hidden layers and a scalar output neuron indicating the semantic similarity score S sem (q, G i ). We train the model using cross entropy loss,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "loss = ylog(S sem ) + (1 \u2212 y)log(1 \u2212 S sem )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "where y \u2208 {0, 1} is a label indicating whether G i is correct or not. Training the model requires a) an alignment of equivalent relation forms, and b) examples (question, partial query) pairs. We describe how we generate them given QA pairs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "An open KB has a huge vocabulary of relations. Aligning the textual relations to ontological relations for collective inference can become challenging if the textual relations are not canonicalized. We, first learn embeddings for the textual relations and cluster them to obtain canonicalized relation clusters (Vashishth et al., 2018) . For instance, a cluster can include both 'is author of' and 'authored'. We use the canonicalized textual relations to derive an alignment to the ontological relations. We derive this alignment based on the support entity pairs (s, o) for a pair of ontological relation and canonicalized textual relation. For instance, relations 'is author of' and book.author in our example question will share more entities than relations 'is author of' and education.institution. The alignment is based on a support threshold i.e. minimum number of support entity pairs for a pair of relations. In our experiments, we set the threshold to 5 to avoid sparse and noisy signals in the alignment.", |
|
"cite_spans": [ |
|
{ |
|
"start": 311, |
|
"end": 335, |
|
"text": "(Vashishth et al., 2018)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Relation Alignment", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Obtaining questions with fully-annotated queries is expensive, especially when queries are complex. In contrast, obtaining answers is easier. In such a setting, the quality of a query candidate is often measured indirectly by computing the F 1 score of its answers to the labeled answers (Peng et al., 2017a) . However, for complex questions, answers to the partial queries may have little or no overlap with the labeled answers. We, therefore, adopt an alternative scoring strategy where we estimate the quality of a partial query as the best F 1 score of all its full query derivations. Formally, we compute a score", |
|
"cite_spans": [ |
|
{ |
|
"start": 288, |
|
"end": 308, |
|
"text": "(Peng et al., 2017a)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implicit Supervision", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "V (G (k) i ) for a partial query as: V (G (k) i ) = max i\u2264t\u2264n\u22121 F 1 (D (k) t+1 )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implicit Supervision", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "where D t denotes the derivation at level t and n denotes the number of partial queries. Such implicit supervision can be susceptible to spurious derivations which happen to evaluate to the correct answers but do not capture the semantic meaning of a question. We, thus, consider additional priors to promote true positive and false negative examples in the training data. We use L(Q, G i that are mentioned in the question Q. We also use C(Q, G (k) i ) as the fraction of relation words that hit a small hand-crafted lexicon of co-occurring relation and question words. We estimate the quality of a candidate as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implicit Supervision", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "V (G (k) i ) + \u03b3 L(Q, G (k) i ) + \u03b4 C(Q, G (k) i ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implicit Supervision", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "We consider a candidate a positive example if its score is larger than a threshold (0.5) and negative otherwise.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implicit Supervision", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "In this work, we focus on constructing complex queries using a sequence of simple partial queries, each with one main relation path. Since the original question does not have to be chunked into simple questions, constructing composition trees for such questions is fairly simple. Heuristically, a composition tree can simply be derived by estimating the number of main relations (verb phrases) in the question and the dependency between them (subordinating or coordinating). We use a more sophisticated model (Talmor and Berant, 2018) to derive the composition tree. The post-order traversal of the tree yields the order in which partial queries should be executed.", |
|
"cite_spans": [ |
|
{ |
|
"start": 509, |
|
"end": 534, |
|
"text": "(Talmor and Berant, 2018)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Query Composition", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Given a computation tree, we adopt a beam search and evaluate best k candidates for a partial query at each level. This helps maintain tractability in the large space of possible complex query derivations. The semantic matching model only independently scores the partial queries and not complete derivations. We, thus, need to find the best derivation that captures the meaning of the complex input question. To determine the best derivation, we aggregate the scores over the partial queries and consider additional features such as entities and structure of the query. We train a log-linear model on a set of (question-answer) pairs using features such as semantic similarity scores, entity linking scores, number of constraints in the query, number of variables, number of relations and number of answer entities. Given the best scoring derivation, we translate it to a KB query and evaluate it to return answers to the ques-tion. Such an approach has been shown to be successful in answering complex questions over a single knowledge base (Bhutani et al., 2019) . In this work, we extend that approach to scenarios when multiple KBs are available.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1043, |
|
"end": 1065, |
|
"text": "(Bhutani et al., 2019)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Query Composition", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We present experiments that show MULTIQUE outperforms existing KB-QA systems on complex questions. Our approach to construct queries from simple queries and aggregate multiple KBs is superior to methods which map questions directly to queries and use raw text instead.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Datasets. We use two benchmark QA datasets:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "\u2022 CompQWeb (Talmor and Berant, 2018) (Yin et al., 2015) . We evaluate on this dataset to demonstrate our proposed methods are effective on questions of varying complexity. Knowledge Bases. We use the Freebase 2 dump as the curated KB. We construct an extracted KB using StanfordOpenIE (Angeli et al., 2015) over the snippets released by (Talmor and Berant, 2018) for CompQWeb and (Sun et al., 2018) for WebQSP. Evaluation Metric. We report averaged F 1 scores of the predicted answers. We additionally compute precision@1 as the fraction of questions that were answered with the exact gold answer. Baseline systems. We compare against two systems that can handle multiple knowledge sources.", |
|
"cite_spans": [ |
|
{ |
|
"start": 11, |
|
"end": 36, |
|
"text": "(Talmor and Berant, 2018)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 37, |
|
"end": 55, |
|
"text": "(Yin et al., 2015)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 285, |
|
"end": 306, |
|
"text": "(Angeli et al., 2015)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 337, |
|
"end": 362, |
|
"text": "(Talmor and Berant, 2018)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 380, |
|
"end": 398, |
|
"text": "(Sun et al., 2018)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "\u2022 GraftNet+ (Sun et al., 2018) : Given a question, it identifies a KB subgraph potentially containing the answer, annotates it with text and performs a binary classification over the nodes in the subgraph to identify the answer node(s). We point that it collects subgraphs using 2-hop paths from a seed entity. Since this cannot scale for complex questions which can have arbitrary length paths, we follow our query composition strategy to generate subgraphs. We annotate the subgraphs with snippets released with the datasets. We call this approach GraftNet+. \u2022 OQA (Fader et al., 2014) : It is the first KB-QA system to combine curated KB and extracted KB. It uses a cascade of operators to paraphrase and parse questions to queries, and to rewrite and execute queries. It does not generate a unified representation of relation forms across the KBs. For comparison, we augment its knowledge source with our extracted KB and evaluate the model released by the authors.", |
|
"cite_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 30, |
|
"text": "(Sun et al., 2018)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 567, |
|
"end": 587, |
|
"text": "(Fader et al., 2014)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "Several other KB-QA systems (Cui et al., 2017; Abujabal et al., 2017; Bao et al., 2016) use only Freebase and handle simple questions with a few constraints. SplitQA (Talmor and Berant, 2018) and MHQA (Song et al., 2018) handle complex questions, but use web as the knowledge source. Implementation Details. We used NVIDIA GeForce GTX 1080 Ti GPU for our experiments. We initialize word embeddings using GloVe (Pennington et al., 2014) word vectors of dimension 300. We use BiLSTMs to encode the question token and dependency sequences. We use 1024 as the size of hidden layer of MLP and sigmoid as the activation function.", |
|
"cite_spans": [ |
|
{ |
|
"start": 28, |
|
"end": 46, |
|
"text": "(Cui et al., 2017;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 47, |
|
"end": 69, |
|
"text": "Abujabal et al., 2017;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 70, |
|
"end": 87, |
|
"text": "Bao et al., 2016)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 166, |
|
"end": 191, |
|
"text": "(Talmor and Berant, 2018)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 201, |
|
"end": 220, |
|
"text": "(Song et al., 2018)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "We evaluate several configurations. We consider candidates from curated KB as the only available knowledge source to answer questions and use it as a baseline (cKB-only). To demonstrate that inference over curated KB can benefit from open KB, we consider diverse relation forms of curated KB facts from open KB (cKB+oKB). Lastly, we downsample the curated KB candidates to 90%, 75% and 50% to simulate incompleteness in KB. Effectiveness on complex questions. Our proposed system outperforms existing approaches on answering complex questions (Table 1) . Even though both MULTIQUE and GraftNet+ use the same information sources, our semantic matching model outperforms node classification. Also, using extracted facts instead of raw text enables us to exploit the relations between entities in the text. We also achieve significantly higher F 1 than OQA that uses multiple KB but relies on templates for parsing questions to queries directly and does not deeply integrate information from multiple KBs. GraftNet+ (Sun et al., 2018) 31.96/44.78 57.21/68.98 OQA (Fader et al., 2014) 0.42/42.85 21.78/32.63 SplitQA (Talmor and Berant, 2018) -/27.50 -MHQA (Song et al., 2018) -/30.10 - Table 1 : Average F 1 / precision@1 of baseline systems and MULTIQUE in different configurations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1013, |
|
"end": 1031, |
|
"text": "(Sun et al., 2018)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 1060, |
|
"end": 1080, |
|
"text": "(Fader et al., 2014)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1112, |
|
"end": 1137, |
|
"text": "(Talmor and Berant, 2018)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 1152, |
|
"end": 1171, |
|
"text": "(Song et al., 2018)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 543, |
|
"end": 552, |
|
"text": "(Table 1)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1182, |
|
"end": 1189, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "In contrast, we can construct complex query patterns from simple queries, and can infer over diverse relation forms in the KB facts. SplitQA (Talmor and Berant, 2018) and MHQA (Song et al., 2018 ) use a similar approach to answer complex questions using a sequence of simpler questions, but rely solely on noisy web data. Clearly, by combining the knowledge from curated KB, we can answer complex questions more reliably.", |
|
"cite_spans": [ |
|
{ |
|
"start": 176, |
|
"end": 194, |
|
"text": "(Song et al., 2018", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Effectiveness on simple questions. An evaluation on simpler questions demonstrates that MUL-TIQUE can adapt to questions of varying complexity. We achieve the comparable F 1 score on the as other KB-QA systems that adopt an enumerateencode-compare strategy. STAGG (Yih et al., 2015), a popular KB-QA system uses a similar approach for candidate generation but improves the results using feature engineering and by augmenting entity linking with external knowledge and only uses curated KB. MULTIQUE uses multiple KBs, and can be integrated with a better entity linking and scoring scheme for derivations. KB completeness. Our results show that including information from extracted KB helps improve inference over ontological relations and facts for complex questions (as indicated by 3.38 F 1 gain in cKB+oKB). It instead hurts the performance on WebQSP dataset. This can be attributed to the coverage of the accompanying textual data sources of the two datasets. We found that for only 26% of the questions in WebQSP, an extracted fact could be aligned with a curated KB candidate. In contrast, there were 55% such questions in the Com-pQWeb. This illustrates that considering irrelevant, noisy facts does not benefit when curated KB is complete. Such issues can be mitigated by using a more robust retrieval mechanism for text snippets or facts from extracted KB. A KB-QA system must rely on an extracted KB when curated KB is incomplete. This is reflected in the dramatic increase in the percentage of hybrid queries when curated KB candidates were downsampled (e.g., from 17% to 40% at 90% completeness). As expected, the overall F 1 drops because the precise curated KB facts become unavailable. Despite the noise in extracted KBs, we found 5-15% of the hybrid queries found a correct answer. Surprisingly, we find 55% of the queries changed when the KB is downsampled to 90%, but 89% of them did not hurt the average F 1 . This indicates that the system could find alternative queries when KB candidates are dropped. Ablation Study. Queries for complex questions often have additional constraints on the main relation path. 35% of the queries in CompQWeb had at least one constraint, while most of the queries (85%) in WebQSP are simple. Ignoring constraints in candidate generation and in semantic matching drops the overall F 1 score by 9.8% (8.6%) on CompQWeb (WebQSP) (see Table 2 ). Complex questions also are long and contain expressions for matching different relation paths. Including the attention mechanism helps focus on relevant parts of the question and improves the relation inference. We found F 1 drops significantly on CompQWeb when attention is disabled. Re-ranking complete query derivations by additionally considering entity linking scores and query structure consistently helps find better queries. We examined the quality of top-k query derivations (see Table 3 ). For a large majority of the questions, query with the highest F 1 score was among the top-10 candidates. A better re-ranking model, thus, could help achieve higher F 1 score. We also observed that incorporating prior domain knowledge in deriving labels for partial queries at training was useful for complex questions. Qualitative Analysis. The datasets also provide queries over Freebase. We used them to analyze the quality of our training data and the queries generated by our system. We derive labels for each partial query candidate by comparing it to the labeled query. On an average, 4 candidates per ques- tion were labeled correct. We then compare them with the labels derived using implicit supervision. We found on average 3.06 partial queries were true positives and 103.08 were true negatives, with few false positives (1.72) and false negatives (0.78). We further examined if the queries which achieve a non-zero F 1 were spurious. We compared the query components (entities, relations, filter clauses, ordering constraints) of such queries with labeled queries. We found high precision (81.89%) and recall (76.19%) of query components, indicating the queries were indeed precise. Error Analysis. We randomly sampled 50 questions which achieve low F 1 score (< 0.1) and analyzed the queries manually. We found 38% errors were made because of incorrect entities in the query. 92% of the entity linking errors were made at the first partial query. These errors get propagated because we find candidate queries using a staged generation. A better entity linking system can help boost the overall performance. 12% of the queries had an incorrect curated KB relation and 18% of the queries had an incorrect extracted KB relation. In a large fraction of cases (32%) the predicted and true relation paths were ambiguous given the question (e.g., kingdom.rulers vs government for \"Which queen presides over the location...\"). This indicates that relation inference is difficult for highly similar relation forms. Future Work. Future KB-QA systems targeting multiple KBs should address two key challenges. They should model whether a simple query is answerable from a given a KB or not. It should query the reliable, extracted KBs only when the curated KB lacks sufficient evidence. This could help improve overall precision. Second, while resolving multiple query components simultaneous is beneficial, the inference could be improved if the question representation reflected all prior inferences.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 2383, |
|
"end": 2390, |
|
"text": "Table 2", |
|
"ref_id": "TABREF6" |
|
}, |
|
{ |
|
"start": 2883, |
|
"end": 2890, |
|
"text": "Table 3", |
|
"ref_id": "TABREF8" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "KB-QA methods can be broadly classified into: retrieval-based methods, template-based methods and semantic parsing-based methods. Retrievalbased methods use relation extraction or distributed representations (Bordes et al., 2014; Xu et al., 2016) to identify answers from the KB but cannot handle questions where multiple entities and relations have to be identified and aggregated. Template-based methods rely on manually-crafted templates which can encode very complex query logic (Unger et al., 2012; Zou et al., 2014) , but suffer from the limited coverage of templates. Our approach is inspired by (Abujabal et al., 2017) , which decomposes complex questions to simple questions answerable from simple templates. However, we learn solely from question-answer pairs and leverage multiple KBs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 208, |
|
"end": 229, |
|
"text": "(Bordes et al., 2014;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 230, |
|
"end": 246, |
|
"text": "Xu et al., 2016)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 483, |
|
"end": 503, |
|
"text": "(Unger et al., 2012;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 504, |
|
"end": 521, |
|
"text": "Zou et al., 2014)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 603, |
|
"end": 626, |
|
"text": "(Abujabal et al., 2017)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Modern KB-QA systems use neural network models for semantic matching. These use an encode-compare approach (Luo et al., 2018; Yih et al., 2015; Yu et al., 2017) , wherein continuous representations of question and query candidates are compared to pick a candidate which is executed to find answers. These methods require question-answer pairs as training data and focus on a single knowledge source. Combining multiple knowledge sources in KB-QA has been studied before, but predominantly for textual data. (Das et al., 2017b) uses memory networks and universal schema to support inference on the union of KB and text. (Sun et al., 2018) enriches KB subgraphs with entity links from text documents and formulates KB-QA as a node classification task. The key limitations for these methods are that a) they cannot handle highly compositional questions and b) they ignore the relational structure between the entities in the text. Our proposed system additionally uses an extracted KB that explicitly models the relations between entities and can compose complex queries from simple queries.", |
|
"cite_spans": [ |
|
{ |
|
"start": 107, |
|
"end": 125, |
|
"text": "(Luo et al., 2018;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 126, |
|
"end": 143, |
|
"text": "Yih et al., 2015;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 144, |
|
"end": 160, |
|
"text": "Yu et al., 2017)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 507, |
|
"end": 526, |
|
"text": "(Das et al., 2017b)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 619, |
|
"end": 637, |
|
"text": "(Sun et al., 2018)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We formulate complex query construction as a search problem. This is broadly related to structured output prediction (Peng et al., 2017b) and path finding (Xiong et al., 2017; Das et al., 2017a) methods which learn to navigate the search space using supervision from question-answer pairs. These methods are effective for answering simple questions because the search space is small and the rewards to guide the search can be estimated reliably. We extend the ideas of learning from implicit supervision (Liang et al., 2016) and integrate it with partial query evaluation and priors to 9 preserve the supervision signals.", |
|
"cite_spans": [ |
|
{ |
|
"start": 117, |
|
"end": 137, |
|
"text": "(Peng et al., 2017b)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 155, |
|
"end": 175, |
|
"text": "(Xiong et al., 2017;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 176, |
|
"end": 194, |
|
"text": "Das et al., 2017a)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 504, |
|
"end": 524, |
|
"text": "(Liang et al., 2016)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We have presented a new KB-QA system that uses both curated and extracted KBs to answer complex questions. It composes complex queries using simpler queries each targeting a KB. It integrates an enumerate-encode-compare approach and a novel neural-network based semantic matching model to find partial queries. Our system outperforms existing state-of-the-art systems on highly compositional questions, while achieving comparable performance on simple questions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "common.topic.notable types,common.topic.notable for", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://commondatastorage.googleapis.com/freebasepublic/rdf/freebase-rdf-2015-08-02-00-00.gz", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Automated template generation for question answering over knowledge graphs", |
|
"authors": [ |
|
{ |
|
"first": "Abdalghani", |
|
"middle": [], |
|
"last": "Abujabal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohamed", |
|
"middle": [], |
|
"last": "Yahya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirek", |
|
"middle": [], |
|
"last": "Riedewald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerhard", |
|
"middle": [], |
|
"last": "Weikum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "International World Wide Web Conferences Steering Committee", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1191--1200", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abdalghani Abujabal, Mohamed Yahya, Mirek Riede- wald, and Gerhard Weikum. 2017. Automated tem- plate generation for question answering over knowl- edge graphs. In Proc. WWW '17, pages 1191-1200. International World Wide Web Conferences Steering Committee.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Leveraging linguistic structure for open domain information extraction", |
|
"authors": [ |
|
{ |
|
"first": "Gabor", |
|
"middle": [], |
|
"last": "Angeli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Melvin Jose Johnson", |
|
"middle": [], |
|
"last": "Premkumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proc. ACL '15", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "344--354", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gabor Angeli, Melvin Jose Johnson Premkumar, and Christopher D Manning. 2015. Leveraging linguis- tic structure for open domain information extraction. In Proc. ACL '15, volume 1, pages 344-354.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Constraint-based question answering with knowledge graph", |
|
"authors": [ |
|
{ |
|
"first": "Junwei", |
|
"middle": [], |
|
"last": "Bao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nan", |
|
"middle": [], |
|
"last": "Duan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhao", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tiejun", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proc. COLING '16", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2503--2514", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Junwei Bao, Nan Duan, Zhao Yan, Ming Zhou, and Tiejun Zhao. 2016. Constraint-based question an- swering with knowledge graph. In Proc. COLING '16, pages 2503-2514.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "More accurate question answering on freebase", |
|
"authors": [ |
|
{ |
|
"first": "Hannah", |
|
"middle": [], |
|
"last": "Bast", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elmar", |
|
"middle": [], |
|
"last": "Haussmann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proc. CIKM '15", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1431--1440", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hannah Bast and Elmar Haussmann. 2015. More accu- rate question answering on freebase. In Proc. CIKM '15, pages 1431-1440. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Learning to answer complex questions over knowledge bases with query composition", |
|
"authors": [ |
|
{ |
|
"first": "Nikita", |
|
"middle": [], |
|
"last": "Bhutani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xinyi", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 28th ACM International Conference on Information and Knowledge Management", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "739--748", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nikita Bhutani, Xinyi Zheng, and HV Jagadish. 2019. Learning to answer complex questions over knowl- edge bases with query composition. In Proceedings of the 28th ACM International Conference on In- formation and Knowledge Management, pages 739- 748.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Freebase: a collaboratively created graph database for structuring human knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Kurt", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Bollacker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Evans", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Praveen", |
|
"middle": [], |
|
"last": "Paritosh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Sturge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jamie", |
|
"middle": [], |
|
"last": "Taylor", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proc. SIGMOD '08", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1247--1250", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kurt D. Bollacker, Colin Evans, Praveen Paritosh, Tim Sturge, and Jamie Taylor. 2008. Freebase: a col- laboratively created graph database for structuring human knowledge. In Proc. SIGMOD '08, pages 1247-1250. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Open question answering with weakly supervised embedding models", |
|
"authors": [ |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bordes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicolas", |
|
"middle": [], |
|
"last": "Usunier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proc. ECML '14", |
|
"volume": "8724", |
|
"issue": "", |
|
"pages": "165--180", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Antoine Bordes, Jason Weston, and Nicolas Usunier. 2014. Open question answering with weakly super- vised embedding models. In Proc. ECML '14, vol- ume 8724, pages 165-180. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Kbqa: learning question answering over qa corpora and knowledge bases", |
|
"authors": [ |
|
{ |
|
"first": "Wanyun", |
|
"middle": [], |
|
"last": "Cui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanghua", |
|
"middle": [], |
|
"last": "Xiao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haixun", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yangqiu", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Seung-Won", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Hwang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proc. VLDB '17", |
|
"volume": "10", |
|
"issue": "", |
|
"pages": "565--576", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wanyun Cui, Yanghua Xiao, Haixun Wang, Yangqiu Song, Seung-won Hwang, and Wei Wang. 2017. Kbqa: learning question answering over qa corpora and knowledge bases. Proc. VLDB '17, 10(5):565- 576.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Go for a walk and arrive at the answer: Reasoning over paths in knowledge bases using reinforcement learning", |
|
"authors": [ |
|
{ |
|
"first": "Rajarshi", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shehzaad", |
|
"middle": [], |
|
"last": "Dhuliawala", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manzil", |
|
"middle": [], |
|
"last": "Zaheer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Vilnis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ishan", |
|
"middle": [], |
|
"last": "Durugkar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1711.05851" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rajarshi Das, Shehzaad Dhuliawala, Manzil Za- heer, Luke Vilnis, Ishan Durugkar, Akshay Kr- ishnamurthy, Alex Smola, and Andrew McCal- lum. 2017a. Go for a walk and arrive at the an- swer: Reasoning over paths in knowledge bases using reinforcement learning. arXiv preprint arXiv:1711.05851.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Question answering on knowledge bases and text using universal schema and memory networks", |
|
"authors": [ |
|
{ |
|
"first": "Rajarshi", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manzil", |
|
"middle": [], |
|
"last": "Zaheer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siva", |
|
"middle": [], |
|
"last": "Reddy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proc. ACL '17", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "358--365", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rajarshi Das, Manzil Zaheer, Siva Reddy, and An- drew McCallum. 2017b. Question answering on knowledge bases and text using universal schema and memory networks. In Proc. ACL '17, pages 358-365.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Open question answering over curated and extracted knowledge bases", |
|
"authors": [ |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Fader", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Etzioni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proc. SIGKDD ' 14", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1156--1165", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anthony Fader, Luke Zettlemoyer, and Oren Etzioni. 2014. Open question answering over curated and extracted knowledge bases. In Proc. SIGKDD ' 14, pages 1156-1165. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Hybrid question answering over knowledge base and free text", |
|
"authors": [ |
|
{ |
|
"first": "Yansong", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Songfang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dongyan", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proc. COLING '16", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2397--2407", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yansong Feng, Songfang Huang, Dongyan Zhao, et al. 2016. Hybrid question answering over knowledge base and free text. In Proc. COLING '16, pages 2397-2407.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Neural symbolic machines: Learning semantic parsers on freebase with weak supervision", |
|
"authors": [ |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Berant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Kenneth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ni", |
|
"middle": [], |
|
"last": "Forbus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1611.00020" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chen Liang, Jonathan Berant, Quoc Le, Kenneth D Forbus, and Ni Lao. 2016. Neural symbolic machines: Learning semantic parsers on free- base with weak supervision. arXiv preprint arXiv:1611.00020.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Knowledge base question answering via encoding of complex query graphs", |
|
"authors": [ |
|
{ |
|
"first": "Kangqi", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fengli", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xusheng", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenny", |
|
"middle": [ |
|
"Q" |
|
], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proc. EMNLP '18", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2185--2194", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kangqi Luo, Fengli Lin, Xusheng Luo, and Kenny Q. Zhu. 2018. Knowledge base question answering via encoding of complex query graphs. In Proc. EMNLP '18, pages 2185-2194.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Effective approaches to attention-based neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proc. EMNLP '15", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1412--1421", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thang Luong, Hieu Pham, and Christopher D. Man- ning. 2015. Effective approaches to attention-based neural machine translation. In Proc. EMNLP '15, pages 1412-1421.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Maximum margin reward networks for learning from explicit and implicit supervision", |
|
"authors": [ |
|
{ |
|
"first": "Haoruo", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wen-Tau", |
|
"middle": [], |
|
"last": "Yih", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proc. EMNLP '17", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2368--2378", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Haoruo Peng, Ming-Wei Chang, and Wen-tau Yih. 2017a. Maximum margin reward networks for learning from explicit and implicit supervision. In Proc. EMNLP '17, pages 2368-2378.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Maximum margin reward networks for learning from explicit and implicit supervision", |
|
"authors": [ |
|
{ |
|
"first": "Haoruo", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wen-Tau", |
|
"middle": [], |
|
"last": "Yih", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proc. EMNLP '17", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2368--2378", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Haoruo Peng, Ming-Wei Chang, and Wen-tau Yih. 2017b. Maximum margin reward networks for learning from explicit and implicit supervision. In Proc. EMNLP '17, pages 2368-2378.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proc. EMNLP '14", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. Glove: Global vectors for word representation. In Proc. EMNLP '14, pages 1532- 1543.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Exploring graph-structured passage representation for multihop reading comprehension with graph neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Linfeng", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiguo", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mo", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Radu", |
|
"middle": [], |
|
"last": "Florian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Gildea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1809.02040" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Linfeng Song, Zhiguo Wang, Mo Yu, Yue Zhang, Radu Florian, and Daniel Gildea. 2018. Exploring graph-structured passage representation for multi- hop reading comprehension with graph neural net- works. arXiv preprint arXiv:1809.02040.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Open domain question answering using early fusion of knowledge bases and text", |
|
"authors": [ |
|
{ |
|
"first": "Haitian", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bhuwan", |
|
"middle": [], |
|
"last": "Dhingra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manzil", |
|
"middle": [], |
|
"last": "Zaheer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kathryn", |
|
"middle": [], |
|
"last": "Mazaitis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Cohen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proc. EMNLP '18", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4231--4242", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Haitian Sun, Bhuwan Dhingra, Manzil Zaheer, Kathryn Mazaitis, Ruslan Salakhutdinov, and William Co- hen. 2018. Open domain question answering using early fusion of knowledge bases and text. In Proc. EMNLP '18, pages 4231-4242.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "The web as a knowledge-base for answering complex questions", |
|
"authors": [ |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Talmor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Berant", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proc. NAACL '18", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "641--651", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alon Talmor and Jonathan Berant. 2018. The web as a knowledge-base for answering complex questions. In Proc. NAACL '18, pages 641-651.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Template-based question answering over rdf data", |
|
"authors": [ |
|
{ |
|
"first": "Christina", |
|
"middle": [], |
|
"last": "Unger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lorenz", |
|
"middle": [], |
|
"last": "B\u00fchmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jens", |
|
"middle": [], |
|
"last": "Lehmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Axel-Cyrille Ngonga", |
|
"middle": [], |
|
"last": "Ngomo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Gerber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Cimiano", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proc. WWW '12", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "639--648", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christina Unger, Lorenz B\u00fchmann, Jens Lehmann, Axel-Cyrille Ngonga Ngomo, Daniel Gerber, and Philipp Cimiano. 2012. Template-based question answering over rdf data. In Proc. WWW '12, pages 639-648. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Cesi: Canonicalizing open knowledge bases using embeddings and side information", |
|
"authors": [ |
|
{ |
|
"first": "Shikhar", |
|
"middle": [], |
|
"last": "Vashishth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prince", |
|
"middle": [], |
|
"last": "Jain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Partha", |
|
"middle": [], |
|
"last": "Talukdar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "International World Wide Web Conferences Steering Committee", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1317--1327", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shikhar Vashishth, Prince Jain, and Partha Talukdar. 2018. Cesi: Canonicalizing open knowledge bases using embeddings and side information. In Proc. WWW '18, pages 1317-1327. International World Wide Web Conferences Steering Committee.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Deeppath: A reinforcement learning method for knowledge graph reasoning", |
|
"authors": [ |
|
{ |
|
"first": "Wenhan", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thien", |
|
"middle": [], |
|
"last": "Hoang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [ |
|
"Yang" |
|
], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proc. EMNLP '17", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "564--573", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wenhan Xiong, Thien Hoang, and William Yang Wang. 2017. Deeppath: A reinforcement learning method for knowledge graph reasoning. In Proc. EMNLP '17, pages 564-573.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Question answering on freebase via relation extraction and textual evidence", |
|
"authors": [ |
|
{ |
|
"first": "Kun", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siva", |
|
"middle": [], |
|
"last": "Reddy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yansong", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Songfang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dongyan", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proc. ACL '16", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kun Xu, Siva Reddy, Yansong Feng, Songfang Huang, and Dongyan Zhao. 2016. Question answering on freebase via relation extraction and textual evidence. In Proc. ACL '16.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Semantic parsing via staged query graph generation: Question answering with knowledge base", |
|
"authors": [ |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Wen-Tau Yih", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proc. ACL '15", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1321--1331", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wen-tau Yih, Ming-Wei Chang, Xiaodong He, and Jianfeng Gao. 2015. Semantic parsing via staged query graph generation: Question answering with knowledge base. In Proc. ACL '15, pages 1321- 1331.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "The value of semantic parse labeling for knowledge base question answering", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Wen-Tau Yih", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Richardson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Meek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jina", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Suh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proc. ACL '16", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "201--206", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wen-tau Yih, Matthew Richardson, Chris Meek, Ming- Wei Chang, and Jina Suh. 2016. The value of se- mantic parse labeling for knowledge base question answering. In Proc. ACL '16, volume 2, pages 201- 206.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Answering questions with complex semantic constraints on open knowledge bases", |
|
"authors": [ |
|
{ |
|
"first": "Pengcheng", |
|
"middle": [], |
|
"last": "Yin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nan", |
|
"middle": [], |
|
"last": "Duan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Kao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junwei", |
|
"middle": [], |
|
"last": "Bao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 24th ACM International on Conference on Information and Knowledge Management", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1301--1310", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pengcheng Yin, Nan Duan, Ben Kao, Junwei Bao, and Ming Zhou. 2015. Answering questions with com- plex semantic constraints on open knowledge bases. In Proceedings of the 24th ACM International on Conference on Information and Knowledge Man- agement, pages 1301-1310. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Improved neural relation detection for knowledge base question answering", |
|
"authors": [ |
|
{ |
|
"first": "Mo", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenpeng", |
|
"middle": [], |
|
"last": "Yin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kazi Saidul Hasan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Cicero Dos Santos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bowen", |
|
"middle": [], |
|
"last": "Xiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1704.06194" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mo Yu, Wenpeng Yin, Kazi Saidul Hasan, Ci- cero dos Santos, Bing Xiang, and Bowen Zhou. 2017. Improved neural relation detection for knowl- edge base question answering. arXiv preprint arXiv:1704.06194.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Natural language question answering over rdf: a graph data driven approach", |
|
"authors": [ |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Zou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruizhe", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haixun", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [ |
|
"Xu" |
|
], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenqiang", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dongyan", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proc. SIGMOD '14", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "313--324", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lei Zou, Ruizhe Huang, Haixun Wang, Jeffrey Xu Yu, Wenqiang He, and Dongyan Zhao. 2014. Natural language question answering over rdf: a graph data driven approach. In Proc. SIGMOD '14, pages 313- 324. ACM.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"text": "What college did the author of 'The Hobbit' attend? nesting 3. Which Portuguese speaking countries import fish from Brazil? conjunction 1. Where was Rihanna born? simple Figure 1: Simple vs Complex questions.", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"text": "Partial queries and derivations.", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"text": "System Architecture techniques on questions of varying complexity and KBs of different completeness (Section 6).", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF4": { |
|
"num": null, |
|
"text": "Semantic Matching Model", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF0": { |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td colspan=\"2\">Composition Tree</td><td/><td>simQA</td><td>A</td><td>simQA</td><td>?x</td></tr><tr><td/><td>\u2713</td><td colspan=\"2\">The Hobbit</td><td colspan=\"2\">\"is written by\"</td><td>?a</td></tr><tr><td>G1 Partial Queries</td><td/><td colspan=\"2\">?a The Hobbit \u2713</td><td colspan=\"3\">The Hobbit ?a A: {JRR Tolkien} book_author book_published_by</td></tr><tr><td>G2</td><td>\u2713</td><td colspan=\"2\">JRR Tolkien JRR Tolkien</td><td colspan=\"2\">place_of_birth person.education</td><td>?x institution</td><td>?x</td></tr><tr><td>Derivations (G)</td><td/><td/><td/><td/><td/></tr><tr><td colspan=\"3\">The Hobbit</td><td>institution</td><td>?x</td><td colspan=\"2\">The Hobbit</td><td>institution</td><td>?x</td></tr><tr><td colspan=\"2\">\"is written by\"</td><td/><td colspan=\"2\">person.education</td><td>book_author</td><td>person.education</td></tr></table>", |
|
"text": "G 2 : ?b person.education ?c . ?c institution ?x. Join: G = G 1 join ?a=?b G 2 Evaluate: ans = University of Oxford In this work, we propose a novel KB-QA sys-", |
|
"type_str": "table" |
|
}, |
|
"TABREF6": { |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td>: Ablation results, average F 1 / precision@1, of</td></tr><tr><td>MULTIQUE (cKB+oKB).</td></tr></table>", |
|
"text": "", |
|
"type_str": "table" |
|
}, |
|
"TABREF8": { |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>", |
|
"text": "Percentage of questions with the highest F 1 score in the top-k derivations, and the average best F 1 .", |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |