Benjamin Aw
Add updated pkl file v3
6fa4bc9
raw
history blame contribute delete
No virus
69.3 kB
{
"paper_id": "I08-1047",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T07:42:29.199607Z"
},
"title": "Minimally Supervised Learning of Semantic Knowledge from Query Logs",
"authors": [
{
"first": "Mamoru",
"middle": [],
"last": "Komachi",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Nara Institute of Science and Technology Microsoft Research",
"location": {
"addrLine": "8916-5 Takayama One Microsoft Way Ikoma",
"postCode": "630-0192, 98052",
"settlement": "Nara, Redmond",
"region": "WA",
"country": "Japan, USA"
}
},
"email": "mamoru-k@is.naist.jp"
},
{
"first": "Hisami",
"middle": [],
"last": "Suzuki",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Nara Institute of Science and Technology Microsoft Research",
"location": {
"addrLine": "8916-5 Takayama One Microsoft Way Ikoma",
"postCode": "630-0192, 98052",
"settlement": "Nara, Redmond",
"region": "WA",
"country": "Japan, USA"
}
},
"email": "hisamis@microsoft.com"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "We propose a method for learning semantic categories of words with minimal supervision from web search query logs. Our method is based on the Espresso algorithm (Pantel and Pennacchiotti, 2006) for extracting binary lexical relations, but makes important modifications to handle query log data for the task of acquiring semantic categories. We present experimental results comparing our method with two state-ofthe-art minimally supervised lexical knowledge extraction systems using Japanese query log data, and show that our method achieves higher precision than the previously proposed methods. We also show that the proposed method offers an additional advantage for knowledge acquisition in an Asian language for which word segmentation is an issue, as the method utilizes no prior knowledge of word segmentation, and is able to harvest new terms with correct word segmentation. 4.2.2 Comparison with Basilisk and Espresso",
"pdf_parse": {
"paper_id": "I08-1047",
"_pdf_hash": "",
"abstract": [
{
"text": "We propose a method for learning semantic categories of words with minimal supervision from web search query logs. Our method is based on the Espresso algorithm (Pantel and Pennacchiotti, 2006) for extracting binary lexical relations, but makes important modifications to handle query log data for the task of acquiring semantic categories. We present experimental results comparing our method with two state-ofthe-art minimally supervised lexical knowledge extraction systems using Japanese query log data, and show that our method achieves higher precision than the previously proposed methods. We also show that the proposed method offers an additional advantage for knowledge acquisition in an Asian language for which word segmentation is an issue, as the method utilizes no prior knowledge of word segmentation, and is able to harvest new terms with correct word segmentation. 4.2.2 Comparison with Basilisk and Espresso",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Extraction of lexical knowledge from a large collection of text data with minimal supervision has become an active area of research in recent years. Automatic extraction of relations by exploiting recurring patterns in text was pioneered by Hearst (1992) , who describes a bootstrapping procedure for extracting words in the hyponym (is-a) relation, starting with three manually given lexico-syntactic patterns. This idea of learning with a minimally supervised bootstrapping method using surface text patterns was subsequently adopted for many tasks, including relation extraction (e.g., Brin, 1998; Ri-loff and Jones, 1999; Pantel and Pennacchiotti, 2006) and named entity recognition (e.g., Collins and Singer, 1999; Etzioni et al., 2005) .",
"cite_spans": [
{
"start": 241,
"end": 254,
"text": "Hearst (1992)",
"ref_id": "BIBREF3"
},
{
"start": 589,
"end": 600,
"text": "Brin, 1998;",
"ref_id": "BIBREF0"
},
{
"start": 601,
"end": 625,
"text": "Ri-loff and Jones, 1999;",
"ref_id": null
},
{
"start": 626,
"end": 657,
"text": "Pantel and Pennacchiotti, 2006)",
"ref_id": "BIBREF4"
},
{
"start": 694,
"end": 719,
"text": "Collins and Singer, 1999;",
"ref_id": "BIBREF1"
},
{
"start": 720,
"end": 741,
"text": "Etzioni et al., 2005)",
"ref_id": "BIBREF2"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "In this paper, we describe a method of learning semantic categories of words using a large collection of Japanese search query logs. Our method is based on the Espresso algorithm (Pantel and Pennacchiotti, 2006) for extracting binary lexical relations, adapting it to work well on learning unary relations from query logs. The use of query data as a source of knowledge extraction offers some unique advantages over using regular text.",
"cite_spans": [
{
"start": 179,
"end": 211,
"text": "(Pantel and Pennacchiotti, 2006)",
"ref_id": "BIBREF4"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "\uf09f Web search queries capture the interest of search users directly, while the distribution of the Web documents do not necessarily reflect the distribution of what people search (Silverstein et al., 1998) . The word categories acquired from query logs are thus expected to be more useful for the tasks related to search. \uf09f Though user-generated queries are often very short, the words that appear in queries are generally highly relevant for the purpose of word classification. \uf09f Many search queries consist of keywords, which means that the queries include word segmentation specified by users. This is a great source of knowledge for learning word boundaries for those languages whose regularly written text does not indicate word boundaries, such as Chinese and Japanese.",
"cite_spans": [
{
"start": 178,
"end": 204,
"text": "(Silverstein et al., 1998)",
"ref_id": "BIBREF13"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Although our work naturally fits into the larger goal of building knowledge bases automatically from text, to our knowledge we are the first to explore the use of Japanese query logs for the purpose of minimally supervised semantic category acquisition. Our work is similar to Sekine and Suzuki (2007) , whose goal is to augment a manually created dictionary of named entities by finding contextual patterns from English query logs. Our work is different in that it does not require a fullscale list of categorized named entities but a small number of seed words, and iterates over the data to extract more patterns and instances. Recent work by Pa\u015fca (2007) and Pa\u015fca and Van Durme (2007) also uses English query logs to extract lexical knowledge, but their focus is on learning attributes for named entities, a different focus from ours.",
"cite_spans": [
{
"start": 277,
"end": 301,
"text": "Sekine and Suzuki (2007)",
"ref_id": "BIBREF12"
},
{
"start": 646,
"end": 658,
"text": "Pa\u015fca (2007)",
"ref_id": "BIBREF7"
},
{
"start": 663,
"end": 689,
"text": "Pa\u015fca and Van Durme (2007)",
"ref_id": "BIBREF8"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "In this section, we describe three state-of-the-art algorithms of relation extraction, which serve as the baseline for our work. They are briefly summarized in Table 1 . The goal of these algorithms is to learn target instances, which are the words belonging to certain categories (e.g., cat for the Animal class), or in the case of relation extraction, the pairs of words standing in a particular relationship (e.g., pasta::food for is-a relationship), given the context patterns for the categories or relation types found in source data.",
"cite_spans": [],
"ref_spans": [
{
"start": 160,
"end": 167,
"text": "Table 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "The first step toward the acquisition of instances is to extract context patterns. In previous work, these are surface text patterns, e.g., X such as Y, for extracting words in an is-a relation, with some heuristics for finding the pattern boundaries in text. As we use query logs as the source of knowledge, we simply used everything but the instance string in a query as the pattern for the instance, in a manner similar to Pa\u015fca et al. (2006) . For example, the seed word JAL in the query \"JAL+flight_schedule\" yields the pattern \"#+flight_schedule\". 1 Note that we perform no word segmentation or boundary detection heuristics in identifying these patterns, which makes our approach fast and robust, as the 1 # indicates where the instance occurs in the query string, and + indicates a white space in the original Japanese query. The underscore symbol (_) means there was originally no white space; it is used merely to make the translation in English more readable. 2 The manual classification assigns only one category segmentation errors introduce noise in extracted patterns, especially when the source data contains many out of vocabulary items. The extracted context patterns must then be assigned a score reflecting their usefulness in extracting the instances of a desired type. Frequency is a poor metric here, because frequent patterns may be extremely generic, appearing across multiple categories. Previously proposed methods differ in how to assign the desirability scores to the patterns they find and in using the score to extract instances, as well as in the treatment of generic patterns, whose precision is low but whose recall is high.",
"cite_spans": [
{
"start": 426,
"end": 445,
"text": "Pa\u015fca et al. (2006)",
"ref_id": "BIBREF9"
},
{
"start": 971,
"end": 972,
"text": "2",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Pattern Induction",
"sec_num": "2.1"
},
{
"text": "For the purpose of choosing the set of context patterns that best characterizes the categories, Sekine and Suzuki (2007) report that none of the conventional co-occurrence metrics such as tf.idf, mutual information and chi-squared tests achieved good results on their task, and propose a new measure, which is based on the number of different instances of the category a context c co-occurs with, lized by its token frequency for all categories:",
"cite_spans": [
{
"start": 96,
"end": 120,
"text": "Sekine and Suzuki (2007)",
"ref_id": "BIBREF12"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Sekine and Suzuki (2007)'s Algorithm",
"sec_num": "2.2"
},
{
"text": "C c g f c Score type ) ( log ) ( \uf03d ) 1000 ( ) 1000 ( ) ( ) ( ) ( ctop F ctop f C c F c f c g inst type inst type \uf03d \uf03d",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Sekine and Suzuki (2007)'s Algorithm",
"sec_num": "2.2"
},
{
"text": "where f type is the type frequency of instance terms that c co-occurs with in the category, F inst is the token frequency of context c in the entire data and ctop1000 is the 1000 most frequent contexts. Since they start with a large and reliable named entity dictionary, and can therefore use several hundred seed terms, they simply used the top-k highestscoring contexts and extracted new named entities once and for all, without iteration. Generic patterns receive low scores, and are therefore ignored by this algorithm.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Sekine and Suzuki (2007)'s Algorithm",
"sec_num": "2.2"
},
{
"text": "Thelen and Riloff (2002) for multiple categories. It starts with a small set of seed words and finds all patterns that match these seed words in the corpus. The bootstrapping process begins by selecting a subset of the patterns by the RlogF metric (Riloff, 1996) :",
"cite_spans": [
{
"start": 11,
"end": 24,
"text": "Riloff (2002)",
"ref_id": "BIBREF14"
},
{
"start": 248,
"end": 262,
"text": "(Riloff, 1996)",
"ref_id": "BIBREF10"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "The Basilisk Algorithm",
"sec_num": "2.3"
},
{
"text": ") log( ) ( log i i i i F N F pattern F R \uf0d7 \uf03d",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The Basilisk Algorithm",
"sec_num": "2.3"
},
{
"text": "where F i is the number of category members extracted by pattern i and N i is the total number of instances extracted by pattern i . It then identifies instances by these patterns and scores each instance by the following formula:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The Basilisk Algorithm",
"sec_num": "2.3"
},
{
"text": "i P j j i P F word AvgLog i \uf0e5 \uf03d \uf02b \uf03d 1 ) 1 log( ) (",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The Basilisk Algorithm",
"sec_num": "2.3"
},
{
"text": "where P i is the number of patterns that extract word i . They use the average logarithm to select instances to balance the recall and precision of generic patterns. They add five best instances to the lexicon according to this formula, and the bootstrapping process starts again. Instances are cumulatively collected across iterations, while patterns are discarded at the end of each iteration.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The Basilisk Algorithm",
"sec_num": "2.3"
},
{
"text": "We will discuss the Espresso framework (Pantel and Pennacchiotti, 2006) in some detail because our method is based on it. It is a general-purpose, minimally supervised bootstrapping algorithm that takes as input a few seed instances and iteratively learns surface patterns to extract more instances. The key to Espresso lies in its use of generic patterns: Pantel and Pennacchiotti (2006) assume that correct instances captured by a generic pattern will also be instantiated by some reliable patterns, which denote high precision and low recall patterns.",
"cite_spans": [
{
"start": 39,
"end": 71,
"text": "(Pantel and Pennacchiotti, 2006)",
"ref_id": "BIBREF4"
},
{
"start": 357,
"end": 388,
"text": "Pantel and Pennacchiotti (2006)",
"ref_id": "BIBREF4"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "The Espresso Algorithm",
"sec_num": "2.4"
},
{
"text": "Espresso starts from a small set of seed instances of a binary relation, finds a set of surface patterns P, selects the top-k patterns, extracts the highest scoring m instances, and repeats the process. Espresso ranks all patterns in P according to reliability r \u03c0 , and retains the top-k patterns for instance extraction. The value of k is incremented by one after each iteration.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The Espresso Algorithm",
"sec_num": "2.4"
},
{
"text": "The reliability of a pattern p is based on the intuition that a reliable pattern co-occurs with many reliable instances. They use pointwise mutual information (PMI) and define the reliability of a pattern p as its average strength of association across each input instance i in the set of instances I, weighted by the reliability of each instance i:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The Espresso Algorithm",
"sec_num": "2.4"
},
{
"text": "I i r p i pmi p r I i pm i \uf0e5 \uf0ce \uf0f7 \uf0f7 \uf0f8 \uf0f6 \uf0e7 \uf0e7 \uf0e8 \uf0e6 \uf0d7 \uf03d ) ( max ) , ( ) ( \uf069 \uf070",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The Espresso Algorithm",
"sec_num": "2.4"
},
{
"text": "where r \u03b9 (i) is the reliability of the instance i and max pmi is the maximum PMI between all patterns and all instances. The PMI between instance i = {x,y} and pattern p is estimated by:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The Espresso Algorithm",
"sec_num": "2.4"
},
{
"text": ",* *, ,*, , , log ) , ( p y x y p x p i pmi \uf03d where y p x ,",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The Espresso Algorithm",
"sec_num": "2.4"
},
{
"text": ", is the frequency of pattern p instantiated with terms x and y (recall that Espresso is targeted at extracting binary relations) and where the asterisk represents a wildcard. They multiplied pmi(i,p) with the discounting factor suggested in Pantel and Ravichandran (2004) to alleviate a bias towards infrequent events. The reliability of an instance is defined similarly: a reliable instance is one that associates with as many reliable patterns as possible.",
"cite_spans": [
{
"start": 242,
"end": 272,
"text": "Pantel and Ravichandran (2004)",
"ref_id": "BIBREF5"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "The Espresso Algorithm",
"sec_num": "2.4"
},
{
"text": "P p r p i pmi i r P p pm i \uf0e5 \uf0ce \uf0f7 \uf0f7 \uf0f8 \uf0f6 \uf0e7 \uf0e7 \uf0e8 \uf0e6 \uf0d7 \uf03d ) ( max ) , ( ) ( \uf070 \uf069",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The Espresso Algorithm",
"sec_num": "2.4"
},
{
"text": "where r \u03c0 (p) is the reliability of pattern p, and P is the set of surface patterns. Note that r \u03b9 (i) and r \u03c0 (p) are recursively defined: the computation of the pattern and instance reliability alternates between performing pattern reranking and instance extraction. Similarly to Basilisk, instances are cumulatively learned, but patterns are discarded at the end of each iteration.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The Espresso Algorithm",
"sec_num": "2.4"
},
{
"text": "In this section, we describe the modifications we made to Espresso to derive our algorithm called Tchai.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The Tchai Algorithm",
"sec_num": "3"
},
{
"text": "As mentioned above, the treatment of high-recall, low-precision generic patterns (e.g., #+map, #+animation) present a challenge to minimally supervised learning algorithms due to their amguity. In the case of semantic category acquisition, the problem of ambiguity is exacerbated, because not only the acquired patterns, but also the instances can be highly ambiguous. For example, once we learn an ambiguous instance such as Pokemon, it will start collecting patterns for multiple categories (e.g., Game, Animation and Movie), which is not desirable. In order to control the negative effect of the generic patterns, Espresso introduces a confidence metric, which is similar but separate from the reliability measure, and uses it to filter out the generic patterns falling below a confidence threshold. In our experiments, however, this metric did not produce a score that was substantially different from the reliability score. Therefore, we did not use a confidence metric, and instead opted for not ing ambiguous instances and patterns, where we define ambiguous instance as one that induces more than 1.5 times the number of patterns of viously accepted reliable instances, and ambiguous (or generic) pattern as one that extracts more than twice the number of instances of previously accepted reliable patterns. As we will see in Section 4, this modification improves the precision of the extracted instances, especially in the early stages of iteration.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Filtering Ambiguous Instances and Patterns",
"sec_num": "3.1"
},
{
"text": "Another modification to the Espresso algorithm to reduce the power of generic patterns is to use local max pmi instead of global max pmi . Since PMI ranges [-\u221e, +\u221e], the point of dividing pmi(i,p) by max pmi in Espresso is to normalize the reliability to [0, 1]. However, using PMI directly to estimate the reliability of a pattern when calculating the reliability of an instance may lead to unexpected results because the absolute value of PMI is highly variable across instances and patterns. We define the local max pmi of the reliability of an instance to be the absolute value of the maximum PMI for a given instance, as opposed to taking the maximum for all instances in a given iteration. Local max pmi of the reliability of a pattern is defined in the same way. As we show in the next section, this modification has a large impact on the effectiveness of our algorithm.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Scaling Factor in Reliability Scores",
"sec_num": "3.2"
},
{
"text": "Tchai, unlike Espresso, does not perform the pattern induction step between iterations; rather, it simply recomputes the reliability of the patterns induced at the beginning. Our assumption is that fairly reliable patterns will occur with at least one of the seed instances if they occur frequently enough in query logs. Since pattern induction is computationally expensive, this modification reduces the computation time by a factor of 400.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Performance Improvements",
"sec_num": "3.3"
},
{
"text": "In this section, we present an empirical comparison of Tchai with the systems described in Section 2.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Experiment",
"sec_num": "4"
},
{
"text": "Query logs: The data source for instance extraction is an anonymized collection of query logs submitted to Live Search from January to February 2007, taking the top 1 million unique queries. Queries with garbage characters are removed. Almost all queries are in Japanese, and are accompanied by their frequency within the logs.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Experimental Setup",
"sec_num": "4.1"
},
{
"text": "Our task is to learn word categories that closely reflect the interest of web search users. We believe that a useful categorization of words is task-specific, therefore we did not start with any externally available ontology, but chose to start with a small number of seed words. For our task, we were given a list of 23 categories relevant for web search, with a manual classification of the 10,000 most frequent search words in the log of December 2006 (which we henceforth refer to as the 10K list) into one of these categories. 2 For evaluation, we chose two of the categories, Travel and Financial Services: Travel is the largest category containing 712 words of the 10K list (as all the location names are classified into this category), while Financial Services was the smallest, containing 240 words.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Target categories:",
"sec_num": null
},
{
"text": "We compared three different systems described in Section 2 that implement an iterative algorithm for lexical learning:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Systems:",
"sec_num": null
},
{
"text": "Seeds (with English translation) Travel jal, ana, jr, \u3058\u3083\u3089\u3093(jalan), his Finance \u307f\u305a\u307b\u9280\u884c(Mizuho Bank), \u4e09\u4e95\u4f4f\u53cb\u9280 \u884c (SMBC), jcb, \u65b0 \u751f \u9280 \u884c (Shinsei Bank), \u91ce\u6751\u8b49\u5238(Nomura Securities) \uf09f Basilisk: The algorithm by (Thelen and Riloff, 2002) described in Section 2. \uf09f Espresso: The algorithm by (Pantel and Pennacchiotti, 2006 ) described in Sections 2 and 3. \uf09f Tchai: The Tchai algorithm described in this paper. For each system, we gave the same seed instances. The seed instances are the 5 most frequent words belonging to these categories in the 10K list; they are given in Table 2 . For the Travel category, \"jal\" and \"ana\" are airline companies, \"jr\" stand for Japan Railways, \"jalan\" is an online travel information site, and \"his\" is a travel agency. In the Finance category, three of them are banks, and the other two are a securities company and a credit card firm. Basilisk starts by extracting 20 patterns, and adds 100 instances per iteration. Espresso and Tchai start by extracting 5 patterns and add 200 instances per iteration. Basilisk and Tchai iterated 20 times, while Espresso iterated only 5 times due to computation time.",
"cite_spans": [
{
"start": 197,
"end": 222,
"text": "(Thelen and Riloff, 2002)",
"ref_id": "BIBREF14"
},
{
"start": 276,
"end": 307,
"text": "(Pantel and Pennacchiotti, 2006",
"ref_id": "BIBREF4"
}
],
"ref_spans": [
{
"start": 559,
"end": 566,
"text": "Table 2",
"ref_id": "TABREF1"
}
],
"eq_spans": [],
"section": "Category",
"sec_num": null
},
{
"text": "Tables 3 and 4 are the results of the Tchai algorithm compared to the manual classification. Table 3 shows the results for the Travel category. The precision of Tchai is very high: out of the 297 words classified into the Travel domain that were also in the 10K list, 280 (92.1%) were learned rectly. 3 It turned out that the 17 instances that 3 As the 10K list contained 712 words in the Travel category, the recall against that list is fairly low (~40%). The primary reason for this is that all location names are classified as Travel in the 10K list, and 20 iterations are represent the precision error were due to the ambiguity of hand labeling, as in \u6771\u4eac\u30c7\u30a3\u30ba\u30cb\u30fc\u30e9\u30f3\u30c9 \"Tokyo Disneyland\", which is a popular travel destination, but is classified as Entertainment in the manual annotation. We were also able to correctly learn 251 words that were not in the 10K list according to manual verification; we also harvested 125 new words \"incorrectly\" into the Travel domain, but these words include common nouns related to Travel, such as \u91e3\u308a \"fishing\" and \u30ec\u30f3\u30bf\u30ab \u30fc \"rental car\". Results for the Finance domain show a similar trend, but fewer instances are extracted.",
"cite_spans": [
{
"start": 302,
"end": 303,
"text": "3",
"ref_id": null
},
{
"start": 345,
"end": 346,
"text": "3",
"ref_id": null
}
],
"ref_spans": [
{
"start": 93,
"end": 101,
"text": "Table 3",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Results of the Tchai algorithm",
"sec_num": "4.2.1"
},
{
"text": "Sample instances harvested by our algorithm are given in Table 5 . It includes subclasses of travel-related terms, for some of which no seed words were given (such as Hotels and Attractions). We also note that segmentation errors are entirely absent from the collected terms, demonstrating that query logs are in fact excellently suited for acquiring new words for languages with no explicit word segmentation in text. Figures 1 and 2 show the precision results comparing Tchai with Basilisk and Espresso for the Travel and Finance categories. Tchai outperforms Basilisk and Espresso for both categories: its precision is constantly higher for the Travel category, and it achieves excellent precision for the Finance category, especially in early iterations. The differences in behavior between these two categories are due to the inherent size of these domains. For the not enough to enumerate all frequent location names. Another reason is that the 10K list consists of queries but our algorithm extracts instancesthis sometimes causes a mismatch, e.g.,Tchai extracts \u30ea\u30c3\u30c4 \"Ritz\" but the 10K list contains \u30ea\u30c3\u30c4\u30db\u30c6\u30eb \"Ritz Hotel\". Not in 10K list Travel Not Travel Travel 280 17 251 Not Travel 0 7 125 smaller Finance category, Basilisk and Espresso both suffered from the effect of generic patterns such as #\u30db\u30fc\u30e0\u30da\u30fc\u30b8 \"homepage\" and #\u30ab\u30fc\u30c9 \"card\" in early iterations, whereas Tchai did not select these patterns. Comparing these algorithms in terms of recall is more difficult, as the complete set of words for each category is not known. However, we can estimate the relative recall given the recall of another system. Pantel and Ravichandran (2004) defined relative recall as:",
"cite_spans": [
{
"start": 1623,
"end": 1653,
"text": "Pantel and Ravichandran (2004)",
"ref_id": "BIBREF5"
}
],
"ref_spans": [
{
"start": 57,
"end": 64,
"text": "Table 5",
"ref_id": "TABREF4"
},
{
"start": 419,
"end": 434,
"text": "Figures 1 and 2",
"ref_id": null
},
{
"start": 1128,
"end": 1204,
"text": "Not in 10K list Travel Not Travel Travel 280 17 251 Not Travel 0 7",
"ref_id": "TABREF1"
}
],
"eq_spans": [],
"section": "Results of the Tchai algorithm",
"sec_num": "4.2.1"
},
{
"text": "| | | | | B P A P C C C C C C R R R B A B A B A B A B A \uf0b4 \uf0b4 \uf03d \uf03d \uf03d \uf03d",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "10K list",
"sec_num": null
},
{
"text": "where R A|B is the relative recall of system A given system B, C A and C B are the number of correct instances of each system, and C is the number of true correct instances. C A and C B can be calculated by using the precision, P A and P B , and the number of instances from each system. Using this formula, we estimated the relative recall of each system relative to Espresso. Tables 6 and 7 show that Tchai achieved the best results in both precision and relative recall in the Travel domain. In the Finance domain, Espresso received the highest relative call but the lowest precision. This is because Tchai uses a filtering method so as not to select generic patterns and instances. Table 8 shows the context patterns acquired by different systems after 4 iterations for the Travel domain. 4 The patterns extracted by Basilisk are not entirely characteristic of the Travel category. For example, \"p#sonic\" and \"google+#lytics\" only match the seed word \"ana\", and are clearly irrelevant to the domain. Basilisk uses token count to estimate the score of a pattern, which may explain the extraction of these patterns. Both Basilisk and Espresso identify location names as context patterns (e.g., #\u6771\u4eac \"Tokyo\", #\u4e5d\u5dde \"Kyushu\"), which may be too generic to be characteristic of the domain. In contrast, Tchai finds context patterns that are highly characteristic, including terms related to transportation (#+\u683c\u5b89\u822a\u7a7a\u5238 \"discount plane ticket\", #\u30de\u30a4\u30ec\u30fc\u30b8 \"mileage\") and accommodation (#+\u30db\u30c6\u30eb \"hotel\").",
"cite_spans": [
{
"start": 793,
"end": 794,
"text": "4",
"ref_id": null
}
],
"ref_spans": [
{
"start": 378,
"end": 392,
"text": "Tables 6 and 7",
"ref_id": "TABREF6"
},
{
"start": 686,
"end": 693,
"text": "Table 8",
"ref_id": null
}
],
"eq_spans": [],
"section": "10K list",
"sec_num": null
},
{
"text": "In this subsection, we examine the contribution of each modification to the Espresso algorithm we made in Tchai. Figure 3 illustrates the effect of each modification proposed for the Tchai algorithm in Section 3 on the Travel category. Each line in the graph corresponds to the Tchai algorithm with and without the modification described in Sections 3.1 and 3.2. It shows that the modification to the max pmi function (purple) contributes most significantly to the improved accuracy of our system. The filtering of generic patterns (green) does not show 4 Note that Basilisk and Espresso use context patterns only for the sake of collecting instances, and are not interested in the patterns per se. However, they can be quite useful in characterizing the semantic categories they are acquired for, so we chose to compare them here. Table 7 : Precision (%) and relative recall: Financial Services domain a large effect in the precision of the acquired instances for this category, but produces steadily better results than the system without it. Figure 4 compares the original Espresso algorithm and the modified Espresso algorithm which performs the pattern induction step only at the beginning of the bootstrapping process, as described in Section 3.3. Although there is no significant difference in precision between the two systems, this modification greatly improves the computation time and enables efficient extraction of instances. We believe that our choice of the seed instances to be the most frequent words in the category produces sufficient patterns for extracting new instances. ",
"cite_spans": [
{
"start": 554,
"end": 555,
"text": "4",
"ref_id": null
}
],
"ref_spans": [
{
"start": 113,
"end": 121,
"text": "Figure 3",
"ref_id": "FIGREF1"
},
{
"start": 832,
"end": 839,
"text": "Table 7",
"ref_id": null
},
{
"start": 1045,
"end": 1053,
"text": "Figure 4",
"ref_id": "FIGREF2"
}
],
"eq_spans": [],
"section": "Contributions of Tchai components",
"sec_num": "4.2.3"
},
{
"text": "We proposed a minimally supervised bootstrapping algorithm called Tchai. The main contribution of the paper is to adapt the general-purpose Espresso algorithm to work well on the task of learning semantic categories of words from query logs. The proposed method not only has a superior performance in the precision of the acquired words into semantic categories, but is faster and collects more meaningful context patterns for characterizing the categories than the unmodified Espresso algorithm. We have also shown that the proposed method requires no pre-segmentation of the source text for the purpose of knowledge acquisition.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "5"
},
{
"text": "The manual classification assigns only one category per word, which is not optimal given how ambiguous the category memberships are. However, it is also very difficult to reliably perform a multi-class categorization by hand.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [
{
"text": "This research was conducted during the first author\"s internship at Microsoft Research. We would like to thank the colleagues at Microsoft Research, especially Dmitriy Belenko and Christian K\u00f6nig, for their help in conducting this research.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Acknowledgements",
"sec_num": null
},
{
"text": "Sample Patterns (with English translation) Basilisk #\u6771\u65e5\u672c(east_japan), #\u897f\u65e5\u672c(west_japan), p#sonic, #\u6642\u523b\u8868(timetable), #\u4e5d\u5dde(Kyushu), #+\u30de\u30a4\u30ec \u30fc\u30b8(mileage), #\u30d0\u30b9(bus), google+#lytics, #+\u6599\u91d1(fare), #+\u56fd\u5185(domestic), #\u30db\u30c6\u30eb(hotel) Espresso #\u30d0\u30b9(bus), \u65e5\u672c#(Japan), #\u30db\u30c6\u30eb(hotel), #\u9053\u8def(road), #\u30a4\u30f3(inn), \u30d5\u30b8#(Fuji), #\u6771\u4eac(Tokyo), #\u6599 \u91d1(fare), #\u4e5d\u5dde(Kyushu), #\u6642\u523b\u8868(timetable), #+\u65c5\u884c(travel), #+\u540d\u53e4\u5c4b(Nagoya) Tchai #+\u30db\u30c6\u30eb(hotel), #+\u30c4\u30a2\u30fc(tour), #+\u65c5\u884c(travel), #\u4e88\u7d04(reserve), #+\u822a\u7a7a\u5238(flight_ticket), #+\u683c\u5b89\u822a \u7a7a\u5238(discount_flight_titcket), #\u30de\u30a4\u30ec\u30fc\u30b8(mileage), \u7fbd\u7530\u7a7a\u6e2f+#(Haneda Airport) Table 8 : Sample patterns acquired by three algorithms",
"cite_spans": [],
"ref_spans": [
{
"start": 528,
"end": 535,
"text": "Table 8",
"ref_id": null
}
],
"eq_spans": [],
"section": "System",
"sec_num": null
}
],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Extracting Patterns and Relations from the World Wide Web",
"authors": [
{
"first": "",
"middle": [],
"last": "Sergey Brin",
"suffix": ""
}
],
"year": 1998,
"venue": "WebDB Workshop at 6th International Conference on Extending Database Technology, EDBT '98",
"volume": "",
"issue": "",
"pages": "172--183",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Sergey Brin. 1998. Extracting Patterns and Relations from the World Wide Web. WebDB Workshop at 6th International Conference on Extending Database Technology, EDBT '98. pp. 172-183.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Unsupervised Models for Named Entity Classification",
"authors": [
{
"first": "Michael",
"middle": [],
"last": "Collins",
"suffix": ""
},
{
"first": "Yoram",
"middle": [],
"last": "Singer",
"suffix": ""
}
],
"year": 1999,
"venue": "Proceedings of the Joint SIGDAT Conference on Empirical Methods in Natural Language Processing and Very Large Corpora",
"volume": "",
"issue": "",
"pages": "100--110",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Michael Collins and Yoram Singer. 1999. Unsupervised Models for Named Entity Classification. Proceedings of the Joint SIGDAT Conference on Empirical Me- thods in Natural Language Processing and Very Large Corpora. pp. 100-110.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Unsupervised Named-Entity Extraction from the Web: An Experimental Study",
"authors": [
{
"first": "Oren",
"middle": [],
"last": "Etzioni",
"suffix": ""
},
{
"first": "Michael",
"middle": [],
"last": "Cafarella",
"suffix": ""
},
{
"first": "Dong",
"middle": [],
"last": "Downey",
"suffix": ""
},
{
"first": "Ana-Maria",
"middle": [],
"last": "Popescu",
"suffix": ""
},
{
"first": "Tal",
"middle": [],
"last": "Shaked",
"suffix": ""
},
{
"first": "Stephen",
"middle": [],
"last": "Soderland",
"suffix": ""
},
{
"first": "Daniel",
"middle": [
"S"
],
"last": "Weld",
"suffix": ""
},
{
"first": "Alexander",
"middle": [],
"last": "Yates",
"suffix": ""
}
],
"year": 2005,
"venue": "Artificial Intelligence",
"volume": "165",
"issue": "1",
"pages": "91--134",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Oren Etzioni, Michael Cafarella, Dong Downey, Ana- Maria Popescu, Tal Shaked, Stephen Soderland, Da- niel S. Weld, and Alexander Yates. 2005. Unsuper- vised Named-Entity Extraction from the Web: An Experimental Study. Artificial Intelligence. 165(1). pp. 91-134.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Automatic Acquisition of Hyponyms from Large Text Corpora",
"authors": [
{
"first": "Marti",
"middle": [],
"last": "Hearst",
"suffix": ""
}
],
"year": 1992,
"venue": "Proceedings of the Fourteenth International Conference on Computational Linguistics",
"volume": "",
"issue": "",
"pages": "539--545",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Marti Hearst. 1992. Automatic Acquisition of Hypo- nyms from Large Text Corpora. Proceedings of the Fourteenth International Conference on Computa- tional Linguistics. pp 539-545.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Espresso: Leveraging Generic Patterns for Automatically Harvesting Semantic Relations",
"authors": [
{
"first": "Patrick",
"middle": [],
"last": "Pantel",
"suffix": ""
},
{
"first": "Marco",
"middle": [],
"last": "Pennacchiotti",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of the 21st International Conference on Computational Linguistics and the 44th annual meeting of the ACL",
"volume": "",
"issue": "",
"pages": "113--120",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Patrick Pantel and Marco Pennacchiotti. 2006. Espresso: Leveraging Generic Patterns for Automatically Har- vesting Semantic Relations. Proceedings of the 21st International Conference on Computational Linguis- tics and the 44th annual meeting of the ACL. pp. 113- 120.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Automatically Labeling Semantic Classes",
"authors": [
{
"first": "Patrick",
"middle": [],
"last": "Pantel",
"suffix": ""
},
{
"first": "Deepak",
"middle": [],
"last": "Ravichandran",
"suffix": ""
}
],
"year": 2004,
"venue": "Proceedings of Human Language Technology Conference of the North American Chapter of the Association for Computational Linguistics (HLT/NAACL-04)",
"volume": "",
"issue": "",
"pages": "321--328",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Patrick Pantel and Deepak Ravichandran. 2004. Auto- matically Labeling Semantic Classes. Proceedings of Human Language Technology Conference of the North American Chapter of the Association for Com- putational Linguistics (HLT/NAACL-04). pp. 321- 328.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Acquisition of Categorized Named Entities for Web Search",
"authors": [
{
"first": "Marius",
"middle": [],
"last": "Pa\u015fca",
"suffix": ""
}
],
"year": 2004,
"venue": "Proceedings of the 13th ACM Conference on Information and Knowledge Management (CIKM-04)",
"volume": "",
"issue": "",
"pages": "137--145",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Marius Pa\u015fca. 2004. Acquisition of Categorized Named Entities for Web Search. Proceedings of the 13th ACM Conference on Information and Knowledge Management (CIKM-04). pp. 137-145.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Organizing and Searching the World Wide Web of Fact -Step Two: Harnessing the Wisdom of the Crowds",
"authors": [
{
"first": "Marius",
"middle": [],
"last": "Pa\u015fca",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of the 16th International World Wide Web Conference",
"volume": "",
"issue": "",
"pages": "101--110",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Marius Pa\u015fca. 2007. Organizing and Searching the World Wide Web of Fact -Step Two: Harnessing the Wisdom of the Crowds. Proceedings of the 16th In- ternational World Wide Web Conference (WWW-07). pp. 101-110.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "What You Seek is What You Get: Extraction of Class Attributes from Query Logs",
"authors": [
{
"first": "Marius",
"middle": [],
"last": "Pa\u015fca",
"suffix": ""
},
{
"first": "Benjamin",
"middle": [],
"last": "Van Durme",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of the 20th International Joint Conference on Artificial Intelligence (IJCAI-07)",
"volume": "",
"issue": "",
"pages": "2832--2837",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Marius Pa\u015fca and Benjamin Van Durme. 2007. What You Seek is What You Get: Extraction of Class Attributes from Query Logs. Proceedings of the 20th International Joint Conference on Artificial Intelli- gence (IJCAI-07). pp. 2832-2837.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Organizing and Searching the World Wide Web of Facts -Step One: the One-Million Fact Extraction Challenge",
"authors": [
{
"first": "Marius",
"middle": [],
"last": "Pa\u015fca",
"suffix": ""
},
{
"first": "Dekang",
"middle": [],
"last": "Lin",
"suffix": ""
},
{
"first": "Jeffrey",
"middle": [],
"last": "Bigham",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of the 21st National Conference on Artificial Intelligence (AAAI-06)",
"volume": "",
"issue": "",
"pages": "1400--1405",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Marius Pa\u015fca, Dekang Lin, Jeffrey Bigham, Andrei Lif- chits and Alpa Jain. 2006. Organizing and Searching the World Wide Web of Facts -Step One: the One- Million Fact Extraction Challenge. Proceedings of the 21st National Conference on Artificial Intelli- gence (AAAI-06). pp. 1400-1405.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Automatically Generating Extraction Patterns from Untagged Text",
"authors": [
{
"first": "Ellen",
"middle": [],
"last": "Riloff",
"suffix": ""
}
],
"year": 1996,
"venue": "Proceedings of the Thirteenth National Conference on Artificial Intelligence",
"volume": "",
"issue": "",
"pages": "1044--1049",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Ellen Riloff. 1996. Automatically Generating Extraction Patterns from Untagged Text. Proceedings of the Thirteenth National Conference on Artificial Intelli- gence. pp. 1044-1049.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Learning Dictionaries for Information Extraction by Multi-Level Bootstrapping",
"authors": [
{
"first": "Ellen",
"middle": [],
"last": "Riloff",
"suffix": ""
},
{
"first": "Rosie",
"middle": [],
"last": "Jones",
"suffix": ""
}
],
"year": 1999,
"venue": "Proceedings of the Sixteenth National Conference on Artificial Intellligence (AAAI-99)",
"volume": "",
"issue": "",
"pages": "474--479",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Ellen Riloff and Rosie Jones. 1999. Learning Dictiona- ries for Information Extraction by Multi-Level Boot- strapping. Proceedings of the Sixteenth National Conference on Artificial Intellligence (AAAI-99). pp. 474-479.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Acquiring Ontological Knowledge from Query Logs",
"authors": [
{
"first": "Satoshi",
"middle": [],
"last": "Sekine",
"suffix": ""
},
{
"first": "Hisami",
"middle": [],
"last": "Suzuki",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of the 16 th international conference on World Wide Web",
"volume": "",
"issue": "",
"pages": "1223--1224",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Satoshi Sekine and Hisami Suzuki. 2007. Acquiring Ontological Knowledge from Query Logs. Proceed- ings of the 16 th international conference on World Wide Web. pp. 1223-1224.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Analysis of a Very Large AltaVista Query Log. Digital SRC Technical Note #",
"authors": [
{
"first": "Craig",
"middle": [],
"last": "Silverstein",
"suffix": ""
},
{
"first": "Monika",
"middle": [],
"last": "Henzinger",
"suffix": ""
}
],
"year": 1998,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Craig Silverstein, Monika Henzinger, Hannes Marais, and Michael Moricz. 1998. Analysis of a Very Large AltaVista Query Log. Digital SRC Technical Note #1998-014.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "A Bootstrapping Method for Learning Semantic Lexicons using Extraction Pattern Contexts",
"authors": [
{
"first": "Michael",
"middle": [],
"last": "Thelen",
"suffix": ""
},
{
"first": "Ellen",
"middle": [],
"last": "Riloff",
"suffix": ""
}
],
"year": 2002,
"venue": "Proceedings of Conference on Empirical Methods in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "214--221",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Michael Thelen and Ellen Riloff. 2002. A Bootstrapping Method for Learning Semantic Lexicons using Ex- traction Pattern Contexts. Proceedings of Conference on Empirical Methods in Natural Language Processing. pp. 214-221.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"type_str": "figure",
"num": null,
"text": "Basilisk, Espresso vs. Tchai: Travel Basilisk, Espresso vs. Tchai: Finance",
"uris": null
},
"FIGREF1": {
"type_str": "figure",
"num": null,
"text": "System precision w/o each modification",
"uris": null
},
"FIGREF2": {
"type_str": "figure",
"num": null,
"text": "Modification to the pattern induction step",
"uris": null
},
"TABREF0": {
"text": "present a framework called Basilisk, which extracts semantic lexicons",
"num": null,
"html": null,
"content": "<table><tr><td/><td># of seed</td><td>Target</td><td># of iteration</td><td>Corpus</td><td>Language</td></tr><tr><td>Sekine &amp; Suzuki</td><td>~600</td><td>Categorized NEs</td><td>1</td><td>Query log</td><td>English</td></tr><tr><td>Basilisk</td><td>10</td><td>Semantic lexicon</td><td>\u221e</td><td>MUC-4</td><td>English</td></tr><tr><td>Espresso</td><td>~10</td><td>Semantic relations</td><td>\u221e</td><td>TREC</td><td>English</td></tr><tr><td>Tchai</td><td>5</td><td>Categorized words</td><td>\u221e</td><td>Query log</td><td>Japanese</td></tr><tr><td/><td/><td colspan=\"2\">Table 1: Summary of algorithms</td><td/><td/></tr></table>",
"type_str": "table"
},
"TABREF1": {
"text": "Seed instances for Travel and Financial Services categories",
"num": null,
"html": null,
"content": "<table/>",
"type_str": "table"
},
"TABREF2": {
"text": "Comparison",
"num": null,
"html": null,
"content": "<table><tr><td/><td colspan=\"3\">with manual annotation:</td></tr><tr><td/><td>Travel category</td><td/><td/></tr><tr><td/><td>10K list</td><td/><td>Not in</td></tr><tr><td/><td colspan=\"2\">Finance Not Finance</td><td>10K list</td></tr><tr><td>Finance</td><td>41</td><td>30</td><td>30</td></tr><tr><td>Not Finance</td><td>0</td><td>5</td><td>99</td></tr></table>",
"type_str": "table"
},
"TABREF3": {
"text": "Comparison",
"num": null,
"html": null,
"content": "<table><tr><td>with manual annotation:</td><td/></tr><tr><td>Financial Services category</td><td/></tr><tr><td>Type</td><td>Examples (with translation)</td></tr><tr><td>Place</td><td>\u30c8\u30eb\u30b3 (Turkey), \u30e9\u30b9\u30d9\u30ac\u30b9 (Las</td></tr><tr><td/><td>Vegas), \u30d0\u30ea\u5cf6 (Bali Island)</td></tr><tr><td>Travel agency</td><td>Jtb, \u30c8\u30af\u30fc (www.tocoo.jp), ya-</td></tr><tr><td/><td>hoo (Yahoo ! Travel), net cruiser</td></tr><tr><td>Attraction</td><td>\u30c7\u30a3\u30ba\u30cb\u30fc\u30e9\u30f3\u30c9 (Disneyland),</td></tr><tr><td/><td>usj (Universal Studio Japan)</td></tr><tr><td>Hotel</td><td>\u5e1d\u56fd\u30db\u30c6\u30eb(Imperial Hotel), \u30ea\u30c3</td></tr><tr><td/><td>\u30c4(Ritz Hotel)</td></tr><tr><td colspan=\"2\">Transportation \u4eac\u6d5c\u6025\u884c(Keihin Express), \u5948\u826f\u4ea4</td></tr><tr><td/><td>\u901a(Nara Kotsu Bus Lines)</td></tr></table>",
"type_str": "table"
},
"TABREF4": {
"text": "Extracted Instances",
"num": null,
"html": null,
"content": "<table/>",
"type_str": "table"
},
"TABREF6": {
"text": "Precision (%) and relative recall: Tra-",
"num": null,
"html": null,
"content": "<table><tr><td/><td colspan=\"2\">vel domain</td><td/></tr><tr><td/><td># of inst.</td><td colspan=\"2\">Precision Rel.recall</td></tr><tr><td>Basilisk</td><td>278</td><td>27.3</td><td>0.70</td></tr><tr><td>Espresso</td><td>704</td><td>15.2</td><td>1.00</td></tr><tr><td>Tchai</td><td>223</td><td>35.0</td><td>0.73</td></tr></table>",
"type_str": "table"
}
}
}
}