File size: 92,463 Bytes
6fa4bc9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 |
{
"paper_id": "I11-1045",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T07:32:47.584390Z"
},
"title": "Attribute Extraction from Synthetic Web Search Queries",
"authors": [
{
"first": "Marius",
"middle": [],
"last": "Pa\u015fca",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Google Inc. Mountain View",
"location": {
"postCode": "94043",
"region": "California"
}
},
"email": "mars@google.com"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "The accuracy and coverage of existing methods for extracting attributes of instances from text in general, and Web search queries in particular, are limited by two main factors: availability of input textual data to which the methods can be applied, and inherent limitations of the underlying assumptions and algorithms being used. This paper proposes a weakly-supervised approach for the acquisition of attributes of instances from input data available in the form of synthetic queries automatically generated from submitted queries. The generated queries allow for the acquisition of additional attributes, leading to extracted lists of attributes of higher quality than with comparable previous methods.",
"pdf_parse": {
"paper_id": "I11-1045",
"_pdf_hash": "",
"abstract": [
{
"text": "The accuracy and coverage of existing methods for extracting attributes of instances from text in general, and Web search queries in particular, are limited by two main factors: availability of input textual data to which the methods can be applied, and inherent limitations of the underlying assumptions and algorithms being used. This paper proposes a weakly-supervised approach for the acquisition of attributes of instances from input data available in the form of synthetic queries automatically generated from submitted queries. The generated queries allow for the acquisition of additional attributes, leading to extracted lists of attributes of higher quality than with comparable previous methods.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Motivation: The availability of larger textual data sources has allowed research in information extraction to shift its focus towards robust methods that require little or no annotated data, operate at large scale with lower computational costs, and acquire open-domain information (Banko and Etzioni, 2008) . The information is usually targeted at three levels of granularity: classes (e.g., giant planets), class elements or instances (e.g., jupiter, uranus, saturn) , and relations among instances. Since these types of information would form the backbone of knowledge bases acquired automatically from text (Mooney and Bunescu, 2005) , their acquisition has received increased attention over recent years.",
"cite_spans": [
{
"start": 282,
"end": 307,
"text": "(Banko and Etzioni, 2008)",
"ref_id": "BIBREF2"
},
{
"start": 437,
"end": 468,
"text": "(e.g., jupiter, uranus, saturn)",
"ref_id": null
},
{
"start": 611,
"end": 637,
"text": "(Mooney and Bunescu, 2005)",
"ref_id": "BIBREF14"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Among other types of relations targeted by various extraction methods, attributes (e.g., escape ve-locity, diameter and surface gravity) have emerged as one of the more popular types, as they capture quantifiable properties of their respective classes (giant planets) and instances (jupiter). A variety of attribute extraction methods mine textual data sources ranging from unstructured (Tokunaga et al., 2005) or structured (Cafarella et al., 2008) text within Web documents, to human-compiled encyclopedia Cui et al., 2009) and Web search query logs , attempting to extract, for a given class or instance, a ranked list of attributes that is as comprehensive and accurate as possible. The accuracy and coverage of existing methods (Raju et al., 2008; for extracting attributes of instances are limited by two main factors: availability of input textual data to which the methods can be applied; and inherent limitations of the underlying assumptions and algorithms being used. For example, a simple but effective method was proposed in (Pa\u015fca and Van Durme, 2007) for extracting attributes of an instance, by applying a small set of extraction patterns (e.g., A of I) to Web search queries (e.g., \"escape velocity of jupiter\"). If the input set of queries increased, additional candidate attributes would be extracted.",
"cite_spans": [
{
"start": 387,
"end": 410,
"text": "(Tokunaga et al., 2005)",
"ref_id": "BIBREF25"
},
{
"start": 425,
"end": 449,
"text": "(Cafarella et al., 2008)",
"ref_id": "BIBREF4"
},
{
"start": 508,
"end": 525,
"text": "Cui et al., 2009)",
"ref_id": "BIBREF5"
},
{
"start": 733,
"end": 752,
"text": "(Raju et al., 2008;",
"ref_id": "BIBREF22"
},
{
"start": 1038,
"end": 1065,
"text": "(Pa\u015fca and Van Durme, 2007)",
"ref_id": "BIBREF16"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Contributions: This paper introduces a weaklysupervised approach for the acquisition of attributes of instances from query logs, by automatically expanding the set of known (organic) queries from which attributes are extracted with additional, inferred (synthetic), not-yet-submitted queries. The focus on expanding the input textual data gives a generally-applicable approach, which can be applied to existing methods for attribute acquisition from query logs, to increase coverage. In particular, the application of previously-proposed extraction patterns (Pa\u015fca and Van Durme, 2007) to the expanded set of queries allows for the acquisition of additional attributes that would otherwise not be acquired only from the set of known queries.",
"cite_spans": [
{
"start": 558,
"end": 585,
"text": "(Pa\u015fca and Van Durme, 2007)",
"ref_id": "BIBREF16"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "In order to infer new queries, known queries are aggregated into query templates (e.g., \"lyrics of \u22c6 beatles\") associated with known phrase fillers (e.g., \u22c6\u2192{yesterday, hey jude}). The known phrase fillers of each query template are then expanded into new candidate phrase fillers. In contrast to previous work on query generation (Mitkov and Ha, 2006; Heilman and Smith, 2010) , new queries are generated based on query analysis alone, as opposed to individual document analysis. This has the potential advantages of scalability and robustness when applied to arbitrary, inherently-noisy queries. Among the inferred queries, the ones of higher interest to attribute extraction are those derived from a query template that fixes either a potential attribute (e.g., \"surface gravity of \u22c6\") or a potential instance (e.g., \"\u22c6 of jupiter\"). In experiments using a large set of anonymized search queries, the inferred queries allow for the acquisition of accurate attributes over an evaluation set of 75 instances introduced in previous work . Applications: Attributes are useful in information retrieval, e.g., for suggesting related queries (Bellare et al., 2007) and recommending products (Probst et al., 2007) . They plan an important role in knowledge acquisition and representation (Guarino, 1992) , for example as building blocks in the manual compilation of infoboxes in Wikipedia (Remy, 2002) . Furthermore, the availability of a larger number of more accurate attributes allows for the development of better search interfaces geared towards structured search. Examples of such interfaces are Wolfram Alpha and Google Squared, two search tools that can take as input instances and return lists of attributes and their values.",
"cite_spans": [
{
"start": 331,
"end": 352,
"text": "(Mitkov and Ha, 2006;",
"ref_id": "BIBREF13"
},
{
"start": 353,
"end": 377,
"text": "Heilman and Smith, 2010)",
"ref_id": "BIBREF9"
},
{
"start": 1138,
"end": 1160,
"text": "(Bellare et al., 2007)",
"ref_id": "BIBREF3"
},
{
"start": 1187,
"end": 1208,
"text": "(Probst et al., 2007)",
"ref_id": "BIBREF21"
},
{
"start": 1283,
"end": 1298,
"text": "(Guarino, 1992)",
"ref_id": "BIBREF8"
},
{
"start": 1384,
"end": 1396,
"text": "(Remy, 2002)",
"ref_id": "BIBREF23"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Intuitions: Our attribute extraction method is inspired by several intuitions. First, phrases of the same class (car models, search engines, baseball players etc.) share similar properties or attributes, enter similar relations and satisfy similar constraints. For example, car models have replacement parts, are assembled by some maker in some year, and have an estimated current value. Consequently, known queries that refer to similar phrases may overlap significantly, or even become equal once the phrases have been replaced by a common slot filled by the phrases. Thus, \"kelley blue book value of 2008 dodge charger\" and \"kelley blue book value of 2008 honda civic\" can be grouped into a shared query template \"kelley blue book of 2008 \u22c6\", whose slot \u22c6 is filled by the names of car models. Second, known queries that can be grouped into a shared query template often provide a small sample of, rather than comprehensive coverage of, all phrases that would meaningfully fill the template. Therefore, new queries can be generated by filling the slots of query templates with new phrase fillers similar to known fillers. For instance, \"kelley blue book value of 2008 chrysler sebring\" can be inferred as a new candidate query if chrysler sebring is known to be similar to dodge charger and/or honda civic. Third, a new candidate query is meaningful if the new phrase filler is similar to the known fillers not only statically, but also in the context of the query template. This is particularly important for query templates with few, ambiguous phrase fillers. Consider the query template \"lyrics of \u22c6 beatles\", with the known fillers come together, hey jude and yesterday. Although phrases such as gather together, earlier today and last friday are highly similar to the known fillers, they are not meaningful new fillers for \"lyrics of \u22c6 beatles\" and would therefore produce spurious new queries. In contrast, lovely rita and here comes the sun are similar to the known fillers both statically and in the context of the query template. Scope: Following the above intuitions, attributes can be extracted from generated queries. In turn, generated queries are inferred by essentially replacing phrases from known queries with meaningful, similar phrases. The form (e.g., long and complex, vs. short and simple) and scope (e.g., open-domain vs. domain-specific) of known queries determine the form and scope of inferred queries. While this prevents arbitrarily complex queries from being generated, it has the advantage of homogeneity of new queries relative to known queries. Also, this should have little, if any, impact on extracted attributes, since the latter are actually extracted from queries whose form is relatively simple rather than complex. The scope of new queries is further influenced by the availabil-ity of new phrases that are similar to phrases from known queries. For example, chrysler sebring must be available as a phrase similar to dodge charger and/or honda civic, in order to potentially generate \"kelley blue book of 2008 chrysler sebring\" from the known queries \"kelley blue book of 2008 dodge charger\" and \"kelley blue book of 2008 honda civic\".",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Intuitions and Scope",
"sec_num": "2.1"
},
{
"text": "Aggregation into Query Templates: The input to the method is a set of Web search queries. As described in (Pa\u015fca, 2011) , the sequence of terms available in each query is split into all combinations of triples of a prefix, non-empty infix and postfix. Queries that share a common prefix and common postfix are aggregated into a query template, where the input infixes are the known phrase fillers of the template. For example, queries such as \"lyrics of yesterday beatles\" and \"lyrics of come together beatles\" are aggregated into the template \"lyrics of \u22c6 beatles\", where the template filler \u22c6 corresponds to the set of known phrase fillers, i.e., infixes from the input queries: {yesterday, come together}. An input query may contribute to the creation of multiple query templates, via different infixes. For example, another template created from \"lyrics of yesterday beatles\" \"lyrics of yesterday toni braxton\" is \"lyrics of yesterday \u22c6\".",
"cite_spans": [
{
"start": 106,
"end": 119,
"text": "(Pa\u015fca, 2011)",
"ref_id": "BIBREF19"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Extraction from Generated Queries",
"sec_num": "2.2"
},
{
"text": "Like the subsequent stages of processing, generating all possible infixes of all input queries, especially for large input sets of queries, is a non-trivial computational challenge. However, the computation can be translated into parallelizable operations in a distributed computing framework such as Hadoop (White, 2010) or MapReduce (Dean and Ghemawat, 2004) . In particular, the aggregation of queries into templates can be performed in a single MapReduce step. The mapper takes as input queries, and splits them into one or more mappings from a query template (key) to a corresponding infix, i.e., a known phrase filler (value). For each query template, the reducer simply aggregates its phrase fillers into a set.",
"cite_spans": [
{
"start": 308,
"end": 321,
"text": "(White, 2010)",
"ref_id": "BIBREF26"
},
{
"start": 335,
"end": 360,
"text": "(Dean and Ghemawat, 2004)",
"ref_id": "BIBREF7"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Extraction from Generated Queries",
"sec_num": "2.2"
},
{
"text": "In order to generate new queries, the set of known phrase fillers is expanded into additional candidate phrases that may fill the query template. As a prerequisite to generating candidate phrase fillers, distributionally similar phrases (Lin and Pantel, 2002; Lin and Wu, 2009; Pantel et al., 2009) and their scores are collected in advance. The assumption is that phrases that appear in similar contexts have similar meanings. A phrase is represented as a vector of its contextual features. A feature is a token, collected from windows of three tokens centered around the occurrences of the phrase in sentences across Web documents (Lin and Wu, 2009) . Alternatively, the context could be approximated via linguistic dependencies detected with noun chunking (Pantel et al., 2009) and syntactic parsing (Lin and Pantel, 2002) . In the contextual vector of a phrase, the weight of a feature is the pointwise-mutual information (Lin and Wu, 2009) between the phrase P and the feature F :",
"cite_spans": [
{
"start": 237,
"end": 259,
"text": "(Lin and Pantel, 2002;",
"ref_id": "BIBREF11"
},
{
"start": 260,
"end": 277,
"text": "Lin and Wu, 2009;",
"ref_id": "BIBREF12"
},
{
"start": 278,
"end": 298,
"text": "Pantel et al., 2009)",
"ref_id": "BIBREF20"
},
{
"start": 633,
"end": 651,
"text": "(Lin and Wu, 2009)",
"ref_id": "BIBREF12"
},
{
"start": 759,
"end": 780,
"text": "(Pantel et al., 2009)",
"ref_id": "BIBREF20"
},
{
"start": 803,
"end": 825,
"text": "(Lin and Pantel, 2002)",
"ref_id": "BIBREF11"
},
{
"start": 926,
"end": 944,
"text": "(Lin and Wu, 2009)",
"ref_id": "BIBREF12"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Generation of Candidate Phrase Fillers:",
"sec_num": null
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "P M I(P, F ) = log F req(P, F ) \u00d7 N F req(P ) \u00d7 F req(F )",
"eq_num": "(1)"
}
],
"section": "Generation of Candidate Phrase Fillers:",
"sec_num": null
},
{
"text": "where F req(P, F ) is the frequency of the feature F occurring with the phrase P , and N is the feature vocabulary size. The distributional similarity score between two phrases P 1 and P 2 is the cosine similarity between the contextual vectors of the two phrases. Alternatively, vector similarity could be computed via the Jaccard or Dice coefficients (Pantel et al., 2009) . The lists DS(P ) of most distributionally similar phrases of a phrase P are thus compiled offline, by ranking the similar phrases of P in decreasing order of their DSscore relative to P . The most distributionally similar (Lin and Pantel, 2002; Pantel et al., 2009) phrases, of a known phrase filler K i from a query template T , are considered to be candidate phrase filler U of the respective query template. The score of a candidate relative to the entire set of known fillers is the average of the distributional similarity scores between the candidate and each known filler. For each template T , its candidate phrase fillers U are ranked in decreasing order of their scores. Known phrase fillers of T are discarded from the resulting list of candidate phrase fillers of T .",
"cite_spans": [
{
"start": 353,
"end": 374,
"text": "(Pantel et al., 2009)",
"ref_id": "BIBREF20"
},
{
"start": 599,
"end": 621,
"text": "(Lin and Pantel, 2002;",
"ref_id": "BIBREF11"
},
{
"start": 622,
"end": 642,
"text": "Pantel et al., 2009)",
"ref_id": "BIBREF20"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Generation of Candidate Phrase Fillers:",
"sec_num": null
},
{
"text": "The generation of candidate phrase fillers translates into two MapReduce steps. The first step rearranges the mappings from a query template to a set of known phrase fillers, into mappings from a known phrase filler to a set of query templates. Concretely, the mapper takes as input mappings from a query template (key) to its set of known phrase fillers (value), as they were output after the aggregation into query templates. The mapper emits mappings from a known phrase filler (key) to a query template (value). For each phrase filler, the reducer aggregates its query templates into a set. The second MapReduce step takes this data, and joins it with distributional similarity data. The latter is available as mappings from a phrase (key) to a list of scored similar phrases (value). The mapper selects similar phrases as candidate phrase fillers for the template, as explained earlier. The output of the second step consists in mappings from a query template (key) to its set of known phrase fillers, as well as to a list of scored candidate phrase fillers (value).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Generation of Candidate Phrase Fillers:",
"sec_num": null
},
{
"text": "Filtering of Candidate Phrase Fillers: Candidate phrase fillers generated via distributional similarities are similar to known phrases only statically. To also take the context of the query template into account, the candidates are filtered using the input queries. More precisely, the list of candidate phrases of a query template T is filtered, by retaining only known phrases of some other templates T \u2032 that are equivalent to T . Templates are deemed equivalent if they become identical after removal of stop words and other linking particles (prepositions, conjunctions etc.), term stemming and term reordering. For example, if the unfiltered candidate phrase eleanor rigby for the template \"lyrics of \u22c6 beatles\" appears as a known phrase filler of the template \"lyrics for \u22c6 by the beatles\", then eleanor rigby is retained after filtering for the template \"lyrics of \u22c6 beatles\".",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Generation of Candidate Phrase Fillers:",
"sec_num": null
},
{
"text": "The filtering of candidate phrases using equivalent templates is modeled as two MapReduce steps. The first step takes as input mappings from a query template (key) to its set of known phrase fillers and list of scored candidate phrase fillers (value), as they were output after the generation of candidate phrase fillers. The mapper converts the query template into the corresponding equivalent template that contains the individual terms in lexicographic order. It emits mappings from an equivalent template (key), to a query template with its set of known phrase fillers and its list of scored candidate phrase fillers (value). For each equivalent template, the reducer simply aggregates this data. The second step takes as input mappings from an equivalent template (key) to its query templates with their sets of known phrase fillers and lists of scored candidate phrase fillers (value). For each equivalent template, the mapper iterates over its query templates. It checks which of its candidate phrase fillers occur among the known phrase fillers of the other query templates. The candidate phrase fillers that pass this test are retained as filtered phrase fillers. The mapper emits mappings from a query template (key) to its set of known phrase fillers, list of scored unfiltered phrase fillers, and list of scored filtered phrase fillers. The reducer merely emits its input, without modifications.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Generation of Candidate Phrase Fillers:",
"sec_num": null
},
{
"text": "The relative ranking of candidate phrases from the list of inferred unfiltered phrase fillers (before filtering) is preserved in the list of inferred filtered phrase fillers (after filtering). Each filtered phrase filler inferred for a query template corresponds to a new query, generated by filling the phrase into the slot filler of the query template. Attribute Extraction: Extraction patterns such as \"A of I\", introduced in previous work (Pa\u015fca and Van Durme, 2007) to extract a candidate attribute A for a candidate instance I from queries, can be immediately applied to inferred queries. Thus, additional candidate attributes can be extracted from inferred queries, where the inferred queries are obtained via two types of query templates:",
"cite_spans": [
{
"start": 443,
"end": 470,
"text": "(Pa\u015fca and Van Durme, 2007)",
"ref_id": "BIBREF16"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Generation of Candidate Phrase Fillers:",
"sec_num": null
},
{
"text": "\u2022 query templates that specify a potential instance, and leave the attribute (e.g., A in \"A of I\") as a phrase filler being inferred: Each inferred phrase filler (e.g., core temperature, luminosity) is collected as a candidate attribute of the phrase specified in the query template (jupiter). In this case, attribute extraction is equivalent to transferring an instance associated with a noisy set of attributes that are known phrase fillers of a template, to be associated with new attributes that are inferred phrase fillers of the template.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Generation of Candidate Phrase Fillers:",
"sec_num": null
},
{
"text": "\u2022 query templates that specify a potential attribute, and leave the instance (e.g., I in \"A of I\") as a phrase filler being inferred: For each inferred phrase filler (e.g., cesium 137, water drop), the phrase specified in the query template (mass) is collected as a candidate attribute. In this case, attribute extraction is equivalent to transferring an attribute associated with a noisy set Phrase Ranked List Available in Data Repository caesium [cesium, rubidium, strontium, barium, thallium, lanthanum, potassium, cerium, yttrium, bismuth, indium, gallium, europium, cadmium, antimony, ammonium,..] ch3br [ch3cl, ch4, nh3, ch 4, c2h4, ch3i, nh 3, ch3oh, c2h2, ch3f, c2h6, hcho, no2, h2s, hcn, ch30h, n2o, n20, ch3cn, hcooh, ethane, cc14, ethene, propene, hzo, c02, ch3sh, chbr3,..] Regardless of whether they are specified manually or derived automatically, extraction patterns used in information extraction are imperfect (Kozareva et al., 2008) . The pattern A of I for attribute extraction is no exception. The two types of query templates from above have known phrase fillers that are not true attributes (e.g., europa for the instance jupiter) and instances (e.g., planets in order for the attribute mass) respectively. This phenomenon is not a defect of this particular approach, but is inherited from and shared with any methods using such patterns for attribute extraction, as well as with any methods that rely on seed attributes, when the seeds are noisy rather than clean. Attribute Ranking: As explained earlier, the score of an inferred phrase filler is computed as the average of similarity scores relative to known phrase fillers. The score is assigned to the pair of an attribute and instance extracted from the phrase filler. Attributes extracted for an instance are ranked in decreasing order of the scores.",
"cite_spans": [
{
"start": 449,
"end": 786,
"text": "[cesium, rubidium, strontium, barium, thallium, lanthanum, potassium, cerium, yttrium, bismuth, indium, gallium, europium, cadmium, antimony, ammonium,..] ch3br [ch3cl, ch4, nh3, ch 4, c2h4, ch3i, nh 3, ch3oh, c2h2, ch3f, c2h6, hcho, no2, h2s, hcn, ch30h, n2o, n20, ch3cn, hcooh, ethane, cc14, ethene, propene, hzo, c02, ch3sh, chbr3,..]",
"ref_id": null
},
{
"start": 928,
"end": 951,
"text": "(Kozareva et al., 2008)",
"ref_id": "BIBREF10"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Generation of Candidate Phrase Fillers:",
"sec_num": null
},
{
"text": "Textual Data Sources: The acquisition of instance attributes relies on a random sample of around 100 million fully-anonymized queries in English submitted by Web users in 2010. Each query is accompanied by its frequency of occurrence in the query logs.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Experimental Setting",
"sec_num": "3"
},
{
"text": "A phrase similarity repository is derived following (Pantel et al., 2009) , from unstructured text available within a sample of around 200 million documents in English. The repository provides data for each of around 1 million phrases that occur as full-length queries in the input query logs. It contains ranked lists of the top 200 phrases computed to be the most distributionally similar, for each phrase. Table 1 illustrates the actual aaa, ac compressors, acheron, acrocyanosis, adelaide cbd, african population, agua caliente casino, al hirschfeld, alessandro nesta, american fascism, american society for horticultural science, ancient babylonia, angioplasty, annapolis harbor, antarctic region, arlene martel, arrabiata sauce, artificial intelligence, bangla music, baquba, bb gun, berkshire hathaway, bicalutamide, blue jay, boulder colorado, brittle star, capsicum, carbonate, carotid arteries, chester arthur, christian songs, cloxacillin, cobol, communicable diseases, contemporary art, cortex, ct scan, digital fortress, eartha kitt, eating disorders, file sharing, final fantasy vii, forensics, habbo hotel, halogens, halophytes, ho chi minh trail, icici prudential, jane fonda, juan carlos, karlsruhe, kidney stones, lipoma, loss of appetite, lucky ali, majorca, martin frobisher, mexico city, pancho villa, phosphorus, playing cards, prednisone, right to vote, robotics, rouen, scientific revolution, self-esteem, spandex, strattera, u.s., vida guerra, visual basic, web hosting, windsurfing, wlan Table 2 : Set of 75 target instances, used in the evaluation of instance attribute extraction ranked lists available in the repository for various phrases. The underlying similarity score between two phrases is the cosine between their vectors of context windows. Target Instances: The performance of attribute extraction is computed over a standard set of 75 instances, previously introduced in . As shown in Table 2 , the set of instances ensures varied experimentation across multiple domains. Experimental Runs: The experiments consist of several individual runs. Runs R U and R F acquire attributes from queries inferred via the first type of target query templates (e.g., \"\u22c6 of jupiter\"), before filtering (R U ) and after filtering (R F ). Run R I uses queries inferred via the second type of target query templates (e.g., \"mass of \u22c6\"), after filtering.",
"cite_spans": [
{
"start": 52,
"end": 73,
"text": "(Pantel et al., 2009)",
"ref_id": "BIBREF20"
}
],
"ref_spans": [
{
"start": 409,
"end": 416,
"text": "Table 1",
"ref_id": null
},
{
"start": 1514,
"end": 1521,
"text": "Table 2",
"ref_id": null
},
{
"start": 1924,
"end": 1931,
"text": "Table 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "Experimental Setting",
"sec_num": "3"
},
{
"text": "In order to compare with existing work, a previous extraction method from (Pa\u015fca and Van Durme, 2007) , which uses extraction patterns, is implemented in a baseline run R P . For consistency, the data source to the run R P is the same set of input queries described at the beginning of Label Value Examples of Attributes vital 1.0 capsicum: calorie count cloxacillin: side effects lucky ali: album songs okay 0.5 jane fonda: musical theatre contributions mexico city: cathedral robotics: three laws wrong 0.0 acheron: kingdom berkshire hathaway: tax exclusion contemporary art: urban institute Table 3 : Correctness labels manually assigned to attributes extracted for various instances the section. The per-instance ranked lists of attributes produced by the individual runs from above are concatenated in a series of combination runs. For example, run R F P concatenates the attributes output by R F and by R P , in this order. Evaluation Procedure: The evaluation focuses on the assessment of accuracy of the ranked list of attributes generated for each instance. To remove any undesirable bias towards higher-ranked attributes, the attributes of each list to be evaluated are sorted alphabetically into a merged list. Each attribute of the merged list is manually assigned a correctness label relative to its respective instance. In accordance with previously introduced methodology, an attribute is vital if it must be present in an ideal list of attributes of the instance (e.g., side effects for cloxacillin); okay if it provides useful but non-essential information; and wrong if it is incorrect (Pa\u015fca, 2007) . Thus, a correctness label is manually assigned to a total of 4,833 attributes extracted for the 75 target instances.",
"cite_spans": [
{
"start": 74,
"end": 101,
"text": "(Pa\u015fca and Van Durme, 2007)",
"ref_id": "BIBREF16"
},
{
"start": 1604,
"end": 1617,
"text": "(Pa\u015fca, 2007)",
"ref_id": "BIBREF18"
}
],
"ref_spans": [
{
"start": 594,
"end": 601,
"text": "Table 3",
"ref_id": null
}
],
"eq_spans": [],
"section": "Experimental Setting",
"sec_num": "3"
},
{
"text": "To compute the precision score over a ranked list of attributes, the correctness labels are converted to numeric values (vital to 1, okay to 0.5 and wrong to 0), as shown in Table 3 . Precision at some rank N in the list is measured as the sum of the correctness values of the attributes extracted up to rank N , divided by the number of those attributes.",
"cite_spans": [],
"ref_spans": [
{
"start": 174,
"end": 181,
"text": "Table 3",
"ref_id": null
}
],
"eq_spans": [],
"section": "Experimental Setting",
"sec_num": "3"
},
{
"text": "Attribute Accuracy: Table 4 : Comparative accuracy of the ranked lists of attributes extracted in various runs, as an average over the entire set of 75 instances; and as an average over the (variable) subsets of instances for which some attributes were extracted extracted by various runs. In the upper half of the table, average precision scores penalize instances for which no attributes are extracted. In contrast, the average scores in the lower half of the table only consider the instances for which some attributes are extracted.",
"cite_spans": [],
"ref_spans": [
{
"start": 20,
"end": 27,
"text": "Table 4",
"ref_id": "TABREF1"
}
],
"eq_spans": [],
"section": "Evaluation Results",
"sec_num": "4"
},
{
"text": "The runs R I , R U and R F operate directly over generated queries. One of the two types of query templates that produce attributes performs better, as illustrated by lower scores with R F than with R I . The difference in scores between R U and R F , which are about twice as high at rank 50 for the latter, illustrates the positive impact of filtering the candidate phrase fillers inferred from the query templates.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation Results",
"sec_num": "4"
},
{
"text": "The benefit of combining the output from individual runs is illustrated by the generally higher scores given by combination runs in Table 4 , relative to individual runs that they combine. Among combination runs, R F P gives the highest scores at most ranks. When considering the accuracy of all runs, the highest scores over the entire evaluation set of instances are given by the combination run R F P . Over subsets of instances with non-empty Table 5 : Examples of ranked lists of attributes extracted in run R F from inferred filtered queries. None of these attributes are extracted for the respective instances in the baseline run R P attribute lists, the accuracy of the individual run R F is higher than all other individual runs, at par with the combination run R F P . Table 5 shows the ranked lists of attributes extracted by R F for a sample of instances.",
"cite_spans": [],
"ref_spans": [
{
"start": 132,
"end": 139,
"text": "Table 4",
"ref_id": "TABREF1"
},
{
"start": 447,
"end": 454,
"text": "Table 5",
"ref_id": null
},
{
"start": 779,
"end": 786,
"text": "Table 5",
"ref_id": null
}
],
"eq_spans": [],
"section": "Evaluation Results",
"sec_num": "4"
},
{
"text": "Previous work on attribute extraction uses a variety of types of textual data as sources for mining attributes. Taking advantage of structured and semi-structured text available within Web documents, the method introduced in (Yoshinaga and Torisawa, 2007) assembles and submits listseeking queries to general-purpose Web search engines, and analyzes the retrieved documents to identify common structural (HTML) patterns around class labels given as input, and potential attributes. Similarly, layout (e.g., font color and size) and other HTML tags serve as clues to acquire attributes from either domain-specific documents such as those from product and auction Web sites (Wong et al., 2008) or from arbitrary documents, optionally relying on the presence of explicit itemized lists or tables (Cafarella et al., 2008) . As an alternative to Web documents, articles within online encyclopedia can also be exploited as sources of structured text for attribute extraction, as illustrated by previous work using infoboxes and category labels (Suchanek et al., 2007; Nastase and Strube, 2008; associated with articles within Wikipedia. Working with unstructured text within Web documents, the method described in (Tokunaga et al., 2005) applies manually-created lexicosyntactic patterns to document sentences in order to extract candidate attributes, given various class labels as input. The candidate attributes are ranked using several frequency statistics. If the documents are domain-specific, such as documents containing product reviews, additional heuristically-motivated filters and scoring metrics can be used to extract and rank the attributes (Raju et al., 2008) . In (Bellare et al., 2007) , the extraction is guided by a small set of manually-provided seed instances and attributes rather than manuallycreated patterns, with the purpose of generating training data and extract new pairs of instances and attributes from text.",
"cite_spans": [
{
"start": 225,
"end": 255,
"text": "(Yoshinaga and Torisawa, 2007)",
"ref_id": "BIBREF30"
},
{
"start": 672,
"end": 691,
"text": "(Wong et al., 2008)",
"ref_id": "BIBREF27"
},
{
"start": 793,
"end": 817,
"text": "(Cafarella et al., 2008)",
"ref_id": "BIBREF4"
},
{
"start": 1038,
"end": 1061,
"text": "(Suchanek et al., 2007;",
"ref_id": "BIBREF24"
},
{
"start": 1062,
"end": 1087,
"text": "Nastase and Strube, 2008;",
"ref_id": "BIBREF15"
},
{
"start": 1208,
"end": 1231,
"text": "(Tokunaga et al., 2005)",
"ref_id": "BIBREF25"
},
{
"start": 1649,
"end": 1668,
"text": "(Raju et al., 2008)",
"ref_id": "BIBREF22"
},
{
"start": 1674,
"end": 1696,
"text": "(Bellare et al., 2007)",
"ref_id": "BIBREF3"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "5"
},
{
"text": "Web search queries have also been considered as a textual data source for attribute extraction, using extraction patterns (Pa\u015fca and Van Durme, 2007) or seed attributes (Pa\u015fca, 2007) to guide the extraction, and leading to attributes of higher accuracy than those extracted with equivalent techniques from Web documents. If the input data includes query sessions in addition to sets of search queries, extracted attributes have higher quality . Given an instance (e.g., nissan gt-r) and a numerical attribute (e.g., width) extracted with a method like ours, the acquisition of the corresponding values (e.g., 1.9m) is the aim of other research endeavors (Davidov and Rappoport, 2010; Bakalov et al., 2011) .",
"cite_spans": [
{
"start": 122,
"end": 149,
"text": "(Pa\u015fca and Van Durme, 2007)",
"ref_id": "BIBREF16"
},
{
"start": 169,
"end": 182,
"text": "(Pa\u015fca, 2007)",
"ref_id": "BIBREF18"
},
{
"start": 654,
"end": 683,
"text": "(Davidov and Rappoport, 2010;",
"ref_id": "BIBREF6"
},
{
"start": 684,
"end": 705,
"text": "Bakalov et al., 2011)",
"ref_id": "BIBREF1"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "5"
},
{
"text": "The role of Web search queries in information extraction has been previously explored. In this pa-per, synthetic search queries inferred from existing queries are used to acquire attributes. The queries lead to ranked lists of attributes whose accuracy is higher than with equivalent methods operating over queries. Current work investigates alternative methods for combining attributes from multiple individual runs; the expansion of the target query templates used to extract attributes; and further applications of the inferred queries, in information extraction and beyond.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "6"
}
],
"back_matter": [],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Acquisition of instance attributes via labeled and related instances",
"authors": [
{
"first": "E",
"middle": [],
"last": "Alfonseca",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Pa\u015fca",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Robledo-Arnuncio",
"suffix": ""
}
],
"year": 2010,
"venue": "Proceedings of the 33rd International Conference on Research and Development in Information Retrieval (SIGIR-10)",
"volume": "",
"issue": "",
"pages": "58--65",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "E. Alfonseca, M. Pa\u015fca, and E. Robledo-Arnuncio. 2010. Acquisition of instance attributes via labeled and related instances. In Proceedings of the 33rd International Con- ference on Research and Development in Information Re- trieval (SIGIR-10), pages 58-65, Geneva, Switzerland.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Scad: collective discovery of attribute values",
"authors": [
{
"first": "A",
"middle": [],
"last": "Bakalov",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Fuxman",
"suffix": ""
},
{
"first": "P",
"middle": [],
"last": "Talukdar",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "Chakrabarti",
"suffix": ""
}
],
"year": 2011,
"venue": "Proceedings of the 20th World Wide Web Conference (WWW-11)",
"volume": "",
"issue": "",
"pages": "447--456",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "A. Bakalov, A. Fuxman, P. Talukdar, and S. Chakrabarti. 2011. Scad: collective discovery of attribute values. In Proceedings of the 20th World Wide Web Conference (WWW-11), pages 447-456, Hyderabad, India.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "The tradeoffs between open and traditional relation extraction",
"authors": [
{
"first": "M",
"middle": [],
"last": "Banko",
"suffix": ""
},
{
"first": "O",
"middle": [],
"last": "Etzioni",
"suffix": ""
}
],
"year": 2008,
"venue": "Proceedings of the 46th Annual Meeting of the Association for Computational Linguistics (ACL-08)",
"volume": "",
"issue": "",
"pages": "28--36",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "M. Banko and O. Etzioni. 2008. The tradeoffs between open and traditional relation extraction. In Proceedings of the 46th Annual Meeting of the Association for Computational Linguistics (ACL-08), pages 28-36, Columbus, Ohio.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Lightlysupervised attribute extraction",
"authors": [
{
"first": "K",
"middle": [],
"last": "Bellare",
"suffix": ""
},
{
"first": "P",
"middle": [],
"last": "Talukdar",
"suffix": ""
},
{
"first": "G",
"middle": [],
"last": "Kumaran",
"suffix": ""
},
{
"first": "F",
"middle": [],
"last": "Pereira",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Liberman",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Mccallum",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Dredze",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of the 21st Annual Conference on Neural Information Processing Systems (NIPS-07). Workshop on Machine Learning for Web Search",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "K. Bellare, P. Talukdar, G. Kumaran, F. Pereira, M. Liber- man, A. McCallum, and M. Dredze. 2007. Lightly- supervised attribute extraction. In Proceedings of the 21st Annual Conference on Neural Information Process- ing Systems (NIPS-07). Workshop on Machine Learning for Web Search, Whistler, British Columbia.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "WebTables: Exploring the power of tables on the Web",
"authors": [
{
"first": "M",
"middle": [],
"last": "Cafarella",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Halevy",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "Wang",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Wu",
"suffix": ""
},
{
"first": "Y",
"middle": [],
"last": "Zhang",
"suffix": ""
}
],
"year": 2008,
"venue": "Proceedings of the 34th Conference on Very Large Data Bases (VLDB-08)",
"volume": "",
"issue": "",
"pages": "538--549",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "M. Cafarella, A. Halevy, D. Wang, E. Wu, and Y. Zhang. 2008. WebTables: Exploring the power of tables on the Web. In Proceedings of the 34th Conference on Very Large Data Bases (VLDB-08), pages 538-549, Auckland, New Zealand.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Automatic acquisition of attributes for ontology construction",
"authors": [
{
"first": "G",
"middle": [],
"last": "Cui",
"suffix": ""
},
{
"first": "Q",
"middle": [],
"last": "Lu",
"suffix": ""
},
{
"first": "W",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "Y",
"middle": [],
"last": "Chen",
"suffix": ""
}
],
"year": 2009,
"venue": "Proceedings of the 22nd International Conference on Computer Processing of Oriental Languages",
"volume": "",
"issue": "",
"pages": "248--259",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "G. Cui, Q. Lu, W. Li, and Y. Chen. 2009. Automatic acqui- sition of attributes for ontology construction. In Proceed- ings of the 22nd International Conference on Computer Processing of Oriental Languages, pages 248-259, Hong Kong.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Extraction and approximation of numerical attributes from the Web",
"authors": [
{
"first": "D",
"middle": [],
"last": "Davidov",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Rappoport",
"suffix": ""
}
],
"year": 2010,
"venue": "Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics (ACL-10)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "D. Davidov and A. Rappoport. 2010. Extraction and ap- proximation of numerical attributes from the Web. In Pro- ceedings of the 48th Annual Meeting of the Association for Computational Linguistics (ACL-10), Uppsala, Sweden.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "MapReduce: Simplified data processing on large clusters",
"authors": [
{
"first": "J",
"middle": [],
"last": "Dean",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "Ghemawat",
"suffix": ""
}
],
"year": 2004,
"venue": "Proceedings of the 6th Symposium on Operating Systems Design and Implementation (OSDI-04)",
"volume": "",
"issue": "",
"pages": "137--150",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "J. Dean and S. Ghemawat. 2004. MapReduce: Simplified data processing on large clusters. In Proceedings of the 6th Symposium on Operating Systems Design and Imple- mentation (OSDI-04), pages 137-150, San Francisco, Cal- ifornia.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Concepts, attributes and arbitrary relations",
"authors": [
{
"first": "N",
"middle": [],
"last": "Guarino",
"suffix": ""
}
],
"year": 1992,
"venue": "Data and Knowledge Engineering",
"volume": "8",
"issue": "",
"pages": "249--261",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "N. Guarino. 1992. Concepts, attributes and arbitrary rela- tions. Data and Knowledge Engineering, 8:249-261.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Good question! Statistical ranking for question generation",
"authors": [
{
"first": "M",
"middle": [],
"last": "Heilman",
"suffix": ""
},
{
"first": "N",
"middle": [],
"last": "Smith",
"suffix": ""
}
],
"year": 2010,
"venue": "Proceedings of the 2010 Conference of the North American Association for Computational Linguistics (NAACL-HLT-10)",
"volume": "",
"issue": "",
"pages": "609--617",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "M. Heilman and N. Smith. 2010. Good question! Statisti- cal ranking for question generation. In Proceedings of the 2010 Conference of the North American Association for Computational Linguistics (NAACL-HLT-10), pages 609- 617, Los Angeles, California.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Semantic class learning from the Web with hyponym pattern linkage graphs",
"authors": [
{
"first": "Z",
"middle": [],
"last": "Kozareva",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Riloff",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Hovy",
"suffix": ""
}
],
"year": 2008,
"venue": "Proceedings of the 46th Annual Meeting of the Association for Computational Linguistics (ACL-08)",
"volume": "",
"issue": "",
"pages": "1048--1056",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Z. Kozareva, E. Riloff, and E. Hovy. 2008. Semantic class learning from the Web with hyponym pattern link- age graphs. In Proceedings of the 46th Annual Meeting of the Association for Computational Linguistics (ACL-08), pages 1048-1056, Columbus, Ohio.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Concept discovery from text",
"authors": [
{
"first": "D",
"middle": [],
"last": "Lin",
"suffix": ""
},
{
"first": "P",
"middle": [],
"last": "Pantel",
"suffix": ""
}
],
"year": 2002,
"venue": "Proceedings of the 19th International Conference on Computational linguistics (COLING-02)",
"volume": "",
"issue": "",
"pages": "1--7",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "D. Lin and P. Pantel. 2002. Concept discovery from text. In Proceedings of the 19th International Confer- ence on Computational linguistics (COLING-02), pages 1-7, Taipei, Taiwan.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Phrase clustering for discriminative learning",
"authors": [
{
"first": "D",
"middle": [],
"last": "Lin",
"suffix": ""
},
{
"first": "X",
"middle": [],
"last": "Wu",
"suffix": ""
}
],
"year": 2009,
"venue": "Proceedings of the 47th Annual Meeting of the Association for Computational Linguistics (ACL-IJCNLP-09)",
"volume": "",
"issue": "",
"pages": "1030--1038",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "D. Lin and X. Wu. 2009. Phrase clustering for discrimina- tive learning. In Proceedings of the 47th Annual Meeting of the Association for Computational Linguistics (ACL- IJCNLP-09), pages 1030-1038, Singapore.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "A computer-aided environment for generating multiple-choice test items",
"authors": [
{
"first": "R",
"middle": [],
"last": "Mitkov",
"suffix": ""
},
{
"first": "L",
"middle": [],
"last": "Ha",
"suffix": ""
}
],
"year": 2006,
"venue": "Natural Language Engineering",
"volume": "12",
"issue": "2",
"pages": "177--194",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "R. Mitkov and L. Ha. 2006. A computer-aided environment for generating multiple-choice test items. Natural Lan- guage Engineering, 12(2):177-194.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Mining knowledge from text using information extraction",
"authors": [
{
"first": "R",
"middle": [],
"last": "Mooney",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Bunescu",
"suffix": ""
}
],
"year": 2005,
"venue": "SIGKDD Explorations",
"volume": "7",
"issue": "1",
"pages": "3--10",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "R. Mooney and R. Bunescu. 2005. Mining knowledge from text using information extraction. SIGKDD Explorations, 7(1):3-10.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "Decoding Wikipedia categories for knowledge acquisition",
"authors": [
{
"first": "V",
"middle": [],
"last": "Nastase",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Strube",
"suffix": ""
}
],
"year": 2008,
"venue": "Proceedings of the 23rd National Conference on Artificial Intelligence (AAAI-08)",
"volume": "",
"issue": "",
"pages": "1219--1224",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "V. Nastase and M. Strube. 2008. Decoding Wikipedia cat- egories for knowledge acquisition. In Proceedings of the 23rd National Conference on Artificial Intelligence (AAAI-08), pages 1219-1224, Chicago, Illinois.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "What you seek is what you get: Extraction of class attributes from query logs",
"authors": [
{
"first": "M",
"middle": [],
"last": "Pa\u015fca",
"suffix": ""
},
{
"first": "B",
"middle": [],
"last": "Van Durme",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of the 20th International Joint Conference on Artificial Intelligence (IJCAI-07)",
"volume": "",
"issue": "",
"pages": "2832--2837",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "M. Pa\u015fca and B. Van Durme. 2007. What you seek is what you get: Extraction of class attributes from query logs. In Proceedings of the 20th International Joint Conference on Artificial Intelligence (IJCAI-07), pages 2832-2837, Hy- derabad, India.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "The role of query sessions in extracting instance attributes from web search queries",
"authors": [
{
"first": "M",
"middle": [],
"last": "Pa\u015fca",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Alfonseca",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Robledo-Arnuncio",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Martin-Brualla",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Hall",
"suffix": ""
}
],
"year": 2010,
"venue": "Proceedings of the 32nd European Conference on Information Retrieval (ECIR-10)",
"volume": "",
"issue": "",
"pages": "62--74",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "M. Pa\u015fca, E. Alfonseca, E. Robledo-Arnuncio, R. Martin- Brualla, and K. Hall. 2010. The role of query sessions in extracting instance attributes from web search queries. In Proceedings of the 32nd European Conference on Infor- mation Retrieval (ECIR-10), pages 62-74, Milton Keynes, United Kingdom.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "Organizing and searching the World Wide Web of facts -step two: Harnessing the wisdom of the crowds",
"authors": [
{
"first": "M",
"middle": [],
"last": "Pa\u015fca",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of the 16th World Wide Web Conference (WWW-07)",
"volume": "",
"issue": "",
"pages": "101--110",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "M. Pa\u015fca. 2007. Organizing and searching the World Wide Web of facts -step two: Harnessing the wisdom of the crowds. In Proceedings of the 16th World Wide Web Con- ference (WWW-07), pages 101-110, Banff, Canada.",
"links": null
},
"BIBREF19": {
"ref_id": "b19",
"title": "Asking what no one has asked before: Using phrase similarities to generate synthetic web search queries",
"authors": [
{
"first": "M",
"middle": [],
"last": "Pa\u015fca",
"suffix": ""
}
],
"year": 2011,
"venue": "Proceedings of the 20th International Conference on Information and Knowledge Management (CIKM-11)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "M. Pa\u015fca. 2011. Asking what no one has asked before: Us- ing phrase similarities to generate synthetic web search queries. In Proceedings of the 20th International Confer- ence on Information and Knowledge Management (CIKM- 11), Glasgow, United Kingdom.",
"links": null
},
"BIBREF20": {
"ref_id": "b20",
"title": "Web-scale distributional similarity and entity set expansion",
"authors": [
{
"first": "P",
"middle": [],
"last": "Pantel",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Crestan",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Borkovsky",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Popescu",
"suffix": ""
},
{
"first": "V",
"middle": [],
"last": "Vyas",
"suffix": ""
}
],
"year": 2009,
"venue": "Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing (EMNLP-09)",
"volume": "",
"issue": "",
"pages": "938--947",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "P. Pantel, E. Crestan, A. Borkovsky, A. Popescu, and V. Vyas. 2009. Web-scale distributional similarity and entity set expansion. In Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing (EMNLP-09), pages 938-947, Singapore.",
"links": null
},
"BIBREF21": {
"ref_id": "b21",
"title": "Semi-supervised learning of attribute-value pairs from product descriptions",
"authors": [
{
"first": "K",
"middle": [],
"last": "Probst",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Ghani",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Krema",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Fano",
"suffix": ""
},
{
"first": "Y",
"middle": [],
"last": "Liu",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of the 20th International Joint Conference on Artificial Intelligence (IJCAI-07)",
"volume": "",
"issue": "",
"pages": "2838--2843",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "K. Probst, R. Ghani, M. Krema, A. Fano, and Y. Liu. 2007. Semi-supervised learning of attribute-value pairs from product descriptions. In Proceedings of the 20th Interna- tional Joint Conference on Artificial Intelligence (IJCAI- 07), pages 2838-2843, Hyderabad, India.",
"links": null
},
"BIBREF22": {
"ref_id": "b22",
"title": "An unsupervised approach to product attribute extraction",
"authors": [
{
"first": "S",
"middle": [],
"last": "Raju",
"suffix": ""
},
{
"first": "P",
"middle": [],
"last": "Pingali",
"suffix": ""
},
{
"first": "V",
"middle": [],
"last": "Varma",
"suffix": ""
}
],
"year": 2008,
"venue": "Proceedings of the 31st International Conference on Research and Development in Information Retrieval (SIGIR-08)",
"volume": "",
"issue": "",
"pages": "35--42",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "S. Raju, P. Pingali, and V. Varma. 2008. An unsupervised ap- proach to product attribute extraction. In Proceedings of the 31st International Conference on Research and Devel- opment in Information Retrieval (SIGIR-08), pages 35-42, Singapore.",
"links": null
},
"BIBREF23": {
"ref_id": "b23",
"title": "Wikipedia: The free encyclopedia",
"authors": [
{
"first": "M",
"middle": [],
"last": "Remy",
"suffix": ""
}
],
"year": 2002,
"venue": "Online Information Review",
"volume": "26",
"issue": "6",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "M. Remy. 2002. Wikipedia: The free encyclopedia. Online Information Review, 26(6):434.",
"links": null
},
"BIBREF24": {
"ref_id": "b24",
"title": "Yago: a core of semantic knowledge unifying WordNet and Wikipedia",
"authors": [
{
"first": "F",
"middle": [],
"last": "Suchanek",
"suffix": ""
},
{
"first": "G",
"middle": [],
"last": "Kasneci",
"suffix": ""
},
{
"first": "G",
"middle": [],
"last": "Weikum",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of the 16th World Wide Web Conference (WWW-07)",
"volume": "",
"issue": "",
"pages": "697--706",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "F. Suchanek, G. Kasneci, and G. Weikum. 2007. Yago: a core of semantic knowledge unifying WordNet and Wikipedia. In Proceedings of the 16th World Wide Web Conference (WWW-07), pages 697-706, Banff, Canada.",
"links": null
},
"BIBREF25": {
"ref_id": "b25",
"title": "Automatic discovery of attribute words from Web documents",
"authors": [
{
"first": "K",
"middle": [],
"last": "Tokunaga",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Kazama",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Torisawa",
"suffix": ""
}
],
"year": 2005,
"venue": "Proceedings of the 2nd International Joint Conference on Natural Language Processing (IJCNLP-05)",
"volume": "",
"issue": "",
"pages": "106--118",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "K. Tokunaga, J. Kazama, and K. Torisawa. 2005. Automatic discovery of attribute words from Web documents. In Proceedings of the 2nd International Joint Conference on Natural Language Processing (IJCNLP-05), pages 106- 118, Jeju Island, Korea.",
"links": null
},
"BIBREF26": {
"ref_id": "b26",
"title": "Hadoop: the Definitive Guide. O'Reilly Media",
"authors": [
{
"first": "Tom",
"middle": [],
"last": "White",
"suffix": ""
}
],
"year": 2010,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Tom White. 2010. Hadoop: the Definitive Guide. O'Reilly Media, 2nd edition.",
"links": null
},
"BIBREF27": {
"ref_id": "b27",
"title": "An unsupervised framework for extracting and normalizing product attributes from multiple Web sites",
"authors": [
{
"first": "T",
"middle": [],
"last": "Wong",
"suffix": ""
},
{
"first": "W",
"middle": [],
"last": "Lam",
"suffix": ""
},
{
"first": "T",
"middle": [],
"last": "Wong",
"suffix": ""
}
],
"year": 2008,
"venue": "Proceedings of the 31st International Conference on Research and Development in Information Retrieval (SIGIR-08)",
"volume": "",
"issue": "",
"pages": "35--42",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "T. Wong, W. Lam, and T. Wong. 2008. An unsuper- vised framework for extracting and normalizing product attributes from multiple Web sites. In Proceedings of the 31st International Conference on Research and Develop- ment in Information Retrieval (SIGIR-08), pages 35-42, Singapore.",
"links": null
},
"BIBREF28": {
"ref_id": "b28",
"title": "Automatically refining the Wikipedia infobox ontology",
"authors": [
{
"first": "F",
"middle": [],
"last": "Wu",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "Weld",
"suffix": ""
}
],
"year": 2008,
"venue": "Proceedings of the 17th World Wide Web Conference (WWW-08)",
"volume": "",
"issue": "",
"pages": "635--644",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "F. Wu and D. Weld. 2008. Automatically refining the Wikipedia infobox ontology. In Proceedings of the 17th World Wide Web Conference (WWW-08), pages 635-644, Beijing, China.",
"links": null
},
"BIBREF29": {
"ref_id": "b29",
"title": "Information extraction from Wikipedia: Moving down the long tail",
"authors": [
{
"first": "F",
"middle": [],
"last": "Wu",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Hoffmann",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "Weld",
"suffix": ""
}
],
"year": 2008,
"venue": "Proceedings of the 14th ACM SIGKDD Conference on Knowledge Discovery and Data Mining (KDD-08)",
"volume": "",
"issue": "",
"pages": "731--739",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "F. Wu, R. Hoffmann, and D. Weld. 2008. Information extrac- tion from Wikipedia: Moving down the long tail. In Pro- ceedings of the 14th ACM SIGKDD Conference on Knowl- edge Discovery and Data Mining (KDD-08), pages 731- 739.",
"links": null
},
"BIBREF30": {
"ref_id": "b30",
"title": "Open-domain attribute-value acquisition from semi-structured texts",
"authors": [
{
"first": "N",
"middle": [],
"last": "Yoshinaga",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Torisawa",
"suffix": ""
}
],
"year": 2007,
"venue": "Workshop on Text to Knowledge: The Lexicon/Ontology Interface (OntoLex-2007)",
"volume": "",
"issue": "",
"pages": "55--66",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "N. Yoshinaga and K. Torisawa. 2007. Open-domain attribute-value acquisition from semi-structured texts. In Proceedings of the 6th International Semantic Web Con- ference (ISWC-07), Workshop on Text to Knowledge: The Lexicon/Ontology Interface (OntoLex-2007), pages 55- 66, Busan, South Korea.",
"links": null
}
},
"ref_entries": {
"TABREF1": {
"num": null,
"type_str": "table",
"text": "",
"html": null,
"content": "<table><tr><td>compares precision</td></tr></table>"
}
}
}
} |