File size: 68,525 Bytes
6fa4bc9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 |
{
"paper_id": "I05-1010",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T07:24:42.571378Z"
},
"title": "Automatic Discovery of Attribute Words from Web Documents",
"authors": [
{
"first": "Kosuke",
"middle": [],
"last": "Tokunaga",
"suffix": "",
"affiliation": {},
"email": "kosuke-t@jaist.ac.jp"
},
{
"first": "Jun'ichi",
"middle": [],
"last": "Kazama",
"suffix": "",
"affiliation": {},
"email": "kazama@jaist.ac.jp"
},
{
"first": "Kentaro",
"middle": [],
"last": "Torisawa",
"suffix": "",
"affiliation": {},
"email": "torisawa@jaist.ac.jp"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "We propose a method of acquiring attribute words for a wide range of objects from Japanese Web documents. The method is a simple unsupervised method that utilizes the statistics of words, lexico-syntactic patterns, and HTML tags. To evaluate the attribute words, we also establish criteria and a procedure based on question-answerability about the candidate word. 1 We use C to denote both the class and its class label (the word representing the class). We also use A to denote both the attribute and the word representing it.",
"pdf_parse": {
"paper_id": "I05-1010",
"_pdf_hash": "",
"abstract": [
{
"text": "We propose a method of acquiring attribute words for a wide range of objects from Japanese Web documents. The method is a simple unsupervised method that utilizes the statistics of words, lexico-syntactic patterns, and HTML tags. To evaluate the attribute words, we also establish criteria and a procedure based on question-answerability about the candidate word. 1 We use C to denote both the class and its class label (the word representing the class). We also use A to denote both the attribute and the word representing it.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Knowledge about how we recognize objects is of great practical importance for many NLP tasks. Knowledge about attributes, which tells us from what viewpoints objects are usually understood or described, is one of such type of knowledge. For example, the attributes of car objects will be weight, engine, steering wheel, driving feel, and manufacturer. In other words, attributes are items whose values we want to know when we want to know about the object. More analytically, we tend to regard A as an attribute for objects of class C when A works as if function v = A(o), o \u2208 C where v is necessary to us to identify o (especially to distinguish o from o ( = o) \u2208 C). Therefore, obvious applications of attributes are ones such as summarization [1, 2] and question-answering [3] . Moreover, they can be useful as features in word clustering [4] or machine learning. Although the knowledge base for attributes can be prepared manually (e.g., WordNet [5] ), problems are cost and coverage. To overcome these, we propose a method that automatically acquires attribute knowledge from the Web.",
"cite_spans": [
{
"start": 746,
"end": 749,
"text": "[1,",
"ref_id": "BIBREF0"
},
{
"start": 750,
"end": 752,
"text": "2]",
"ref_id": "BIBREF1"
},
{
"start": 776,
"end": 779,
"text": "[3]",
"ref_id": "BIBREF2"
},
{
"start": 842,
"end": 845,
"text": "[4]",
"ref_id": "BIBREF3"
},
{
"start": 950,
"end": 953,
"text": "[5]",
"ref_id": "BIBREF4"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "To acquire the attributes for a given class, C (e.g., car ), the proposed method first downloads documents that contain class label C (e.g., \"car\") from the Web. 1 We extract the candidates of attribute words from these documents and score them according to the statistics of words, lexico-syntactic patterns, and HTML tags. Highly scored words are output as attributes for the class. Lexico-syntactic patterns and other statistics have been used in other lexical knowledge acquisition systems [3, 4, 6, 7, 8] . We specifically used lexico-syntactic patterns involving the Japanese postposition \"no\" as used in [8] such as \"C no A\" where A is an attribute word, which is almost equivalent to pattern \"A of C\" used in [7] to find part-whole relations. Novel features of our method are its use of Web search engines to focus on documents highly relevant to the class and its use of statistics concerning attribute words and surrounding HTML tags.",
"cite_spans": [
{
"start": 162,
"end": 163,
"text": "1",
"ref_id": "BIBREF0"
},
{
"start": 494,
"end": 497,
"text": "[3,",
"ref_id": "BIBREF2"
},
{
"start": 498,
"end": 500,
"text": "4,",
"ref_id": "BIBREF3"
},
{
"start": 501,
"end": 503,
"text": "6,",
"ref_id": "BIBREF5"
},
{
"start": 504,
"end": 506,
"text": "7,",
"ref_id": "BIBREF6"
},
{
"start": 507,
"end": 509,
"text": "8]",
"ref_id": "BIBREF7"
},
{
"start": 611,
"end": 614,
"text": "[8]",
"ref_id": "BIBREF7"
},
{
"start": 717,
"end": 720,
"text": "[7]",
"ref_id": "BIBREF6"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "One of the difficulties in studying attribute knowledge is that there are no standard definitions of attributes, or criteria for evaluating obtained attributes. In this paper, we propose a simple but effective definition of attributes that matches our motivation and applications, i.e., whether we can ask a question about the attribute and whether there is an answer to that question (question answerability). For example, one can ask as \"Who is the manufacturer of this car?\", and someone might answer \"Honda\", because we want to know the manufacturer when we concerned about cars. We designed a procedure for evaluating attributes based on this idea. As the literature points out [9, 10] , attributes can include many types of relations such as property (e.g., weight ), part-of (e.g., engine), telic (e.g., driving feel ), and agentive (e.g., manufacturer ). However, we ignored type distinctions in this study. First, because attributes are useful even if the type is not known, and second, because defining attributes as one of these types and evaluating them only complicates the evaluation process, making the results unstable. The use of linguistic tests to define attributes is not that new. Woods [11] devised a test on whether we can say \"The A of o is v.\" Although we followed this procedure, we focused more on attributes that are important for our understanding of an object by using question-answerability as our criterion.",
"cite_spans": [
{
"start": 683,
"end": 686,
"text": "[9,",
"ref_id": "BIBREF8"
},
{
"start": 687,
"end": 690,
"text": "10]",
"ref_id": "BIBREF9"
},
{
"start": 1208,
"end": 1212,
"text": "[11]",
"ref_id": "BIBREF10"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Our method is based on the following three observations. 1. Attributes tend to occur in documents that contain the class label and not in other documents. 2. Attributes tend to be emphasized by the use of certain HTML tags or occur as items in HTML itemizations or tables in Web documents. 3. Attributes tend to co-occur with the class label in specific lexico-syntactic patterns involving the postposition \"no.\"",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Basic Observations on Attributes",
"sec_num": "2.1"
},
{
"text": "To acquire the attributes of class C, we first download documents that contain class label C using a Web search engine, according to the first observation. We refer to this set of documents as a local document set (LD(C)). All the nouns appearing in the local document set are regarded as candidates of attribute words. Here, the nouns are words tagged as \"proper nouns\", \"sahen nouns\" (nouns that can become a verb with the suffix \"suru\"), \"location\", or \"unknown\" (e.g., words written in katakana) by a Japanese morphological analyzer, JUMAN [12] . Note that we restricted ourselves to single word attributes in this study. The obtained candidate words are scored in the next step. Table 1 . Lexico-syntactic patterns for attribute acquisition. (We added possible English translations for the patterns in parenthesis).",
"cite_spans": [
{
"start": 544,
"end": 548,
"text": "[12]",
"ref_id": "BIBREF11"
}
],
"ref_spans": [
{
"start": 684,
"end": 691,
"text": "Table 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Extraction of Candidate Words",
"sec_num": "2.2"
},
{
"text": "C no A ha (A of C [verb]) C no A de (by A of C) C no A e (to A of C) C no A ga (A of C [verb]) C no A made (even/until A of C) C no AA(A of C,) C no A wo ([verb] A of C) C no A kara (from A of C) C no A ni (at/in A of C) C no A yori (from/than A of C)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Extraction of Candidate Words",
"sec_num": "2.2"
},
{
"text": "We rank the candidate words according to a score that reflects the observations described in Sect. 2.1. The overall score takes the following form.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Ranking of Candidate Words",
"sec_num": "2.3"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "V (C, A) = n(C, A) \u2022 f (C, A) \u2022 t(C, A) \u2022 df idf (C, A),",
"eq_num": "(1)"
}
],
"section": "Ranking of Candidate Words",
"sec_num": "2.3"
},
{
"text": "where A is the candidate word to be scored and C is the class. n(C, A) and f (C, A) are scores concerning lexico-syntactic patterns. t(C, A) is a score concerning the statistics of HTML tags to reflect the second observation. Finally, df idf (C, A) is the score related to word statistics. This reflects the first observation. By multiplying these sub-scores, we expect that they will complement each other. We will explain the details on these sub-scores in the following.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Ranking of Candidate Words",
"sec_num": "2.3"
},
{
"text": "As previously mentioned, we use lexico-syntactic patterns including the Japanese postposition \"no\" as clues. The patterns take the form \"C no A P OST \" where P OST is a Japanese postposition or a punctuation mark. 2 The actual patterns used are listed in Table 1 . Score n(C, A) is the number of times C and A co-occur in these patterns in the local document set LD(C).",
"cite_spans": [
{
"start": 214,
"end": 215,
"text": "2",
"ref_id": "BIBREF1"
}
],
"ref_spans": [
{
"start": 255,
"end": 262,
"text": "Table 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Ranking of Candidate Words",
"sec_num": "2.3"
},
{
"text": "Score f (C, A) requires more explanation. Roughly, f (C, A) is the number of times C and A co-occur in the patterns without the last postposition (i.e., pattern \"C no A\") collected from 33 years of parsed newspaper articles. 3 Note that pattern matching was done against the parsed dependency structures. 4 The reason this score was used in addition to n(C, A) was to obtain more reliable scores by increasing the number of documents to be matched. This may sound contradictory to the fact that the Web is the largest corpus in the world. However, we found that we could not obtain all the documents that contained the class label because existing commercial Web search engines return URLs for a very small fraction of matched documents (usually up to about 1,000 documents). Although we could use hit counts for the patterns, we did not do this to avoid overloading the search engine (each class has about 20,000 candidate words).",
"cite_spans": [
{
"start": 225,
"end": 226,
"text": "3",
"ref_id": "BIBREF2"
},
{
"start": 305,
"end": 306,
"text": "4",
"ref_id": "BIBREF3"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Ranking of Candidate Words",
"sec_num": "2.3"
},
{
"text": "Score t(C, A) is the number of times A appears in LD(C) surrounded by HTML tags. More precisely, we count the number of times A appears in the form: \"<tag1 >A<tag2 >\" where the number of characters between HTML tags (i.e., the length of A) is 20 at maximum. The tags (<tag1 > and <tag2 >) can be either a start tag (e.g., <A>) or an end tag (e.g., </A>). This score is intended to give high values for words that are emphasized or occur in itemizations or tables. For example, in the HTML document in Fig. 1 , the words \" (Thai-curry)\", \" (ingredient)\", \" (spice)\", \" (coriander, cumin)\", and \" (recipe)\" are counted. Finally, df idf (C, A), which reflects the first observation, is calculated as:",
"cite_spans": [],
"ref_spans": [
{
"start": 501,
"end": 507,
"text": "Fig. 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Ranking of Candidate Words",
"sec_num": "2.3"
},
{
"text": "df idf (C, A) = df (A, LD(C)) \u2022 idf (A), idf(A) = log |G| df (A,G) .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Ranking of Candidate Words",
"sec_num": "2.3"
},
{
"text": "df (A, X) denotes the number of documents where A appears in documents X. G is a large set of randomly collected Web documents, which we call the global document set. We derived this score from a similar score, which was used in [14] to measure the association between a hypernym and hyponyms.",
"cite_spans": [
{
"start": 229,
"end": 233,
"text": "[14]",
"ref_id": "BIBREF13"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Ranking of Candidate Words",
"sec_num": "2.3"
},
{
"text": "This section presents the evaluation criteria based on question-answerability (QA tests). Based on the criteria, we designed an evaluation procedure where the evaluators were asked to answer either by yes or no to four tests at maximum, i.e., a hyponymy test (Sect. 3.4), a QA test (Sect. 3.1) and a suffix augmented QA test (Sect. 3.2) followed by a generality test (Sect. 3.3).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation Criteria",
"sec_num": "3"
},
{
"text": "By definitions we used, attributes are what we want to know about the object. Therefore, if A is an attribute of objects of class C, we can arrange questions (consisting of A and C) that require the values for A as the answer. Then someone should be able to answer the questions. For example, we can ask \"Who is the director of this movie?\" because director is an attribute of movie. The answer might be someone such as \"Stanley Kubrick.\" We designed the QA test shown in Fig. 2 to assess the correctness of attribute A for class C based on this criterion. Several points should be noted. First, since the value for the attribute is actually defined for the object instance (i.e., v = A(o), o \u2208 C), we should qualify class label C using \"kono (this)\" to refer to an object instance of class C. Second, since we cannot know what question is possible for A beforehand, we generate all the question types listed in Fig. 2 and ask whether any of them are acceptable. Third, the question should be natural as well as grammatically correct. Naturalness was explained to the evaluators as positively determining whether the question can be their first choice in usual conversations. In our point of view, attributes should be important items for people in describing objects. We assumed that attributes that conformed to the naturalness criterion would be such important attributes. For example, stapler is not an attribute of company in our sense, although almost all companies own stapler s. Our naturalness criterion can reflect this observation since the question \"What is the stapler of this company?\" is unnatural as a first question when talking about a company, and therefore we can successfully conclude that stapler is not an attribute. Note that Woods' linguistic test [11] (i.e., whether \"the attribute of an object is a value\" can be stated or not) cannot reject stapler since it does not have the naturalness requirement (e.g., we can say \"the stapler of [used by] SONY is Stapler-X\"). 5 In addition, note that such importances can be assessed more easily in the QA test, since questioners basically ask what they think is important at least at the time of utterance. However, we cannot expect such an implication even though the declarative sentence is acceptable.",
"cite_spans": [
{
"start": 1773,
"end": 1777,
"text": "[11]",
"ref_id": "BIBREF10"
},
{
"start": 1993,
"end": 1994,
"text": "5",
"ref_id": "BIBREF4"
}
],
"ref_spans": [
{
"start": 472,
"end": 478,
"text": "Fig. 2",
"ref_id": null
},
{
"start": 912,
"end": 918,
"text": "Fig. 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "Question-Answerability Test",
"sec_num": "3.1"
},
{
"text": "Finally, the answer to the question does not necessarily need to be written in language. For example, values for attributes such as map, picture, and blueprint cannot be written as language expressions but can be represented by other media. Such attributes are not rare since we obtain attributes from the Web.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Question-Answerability Test",
"sec_num": "3.1"
},
{
"text": "Some attributes that are obtained can fail the QA test even if they are correct, especially when the surface form is different from the one they actually mean. This often occurs since Japanese is very elliptic and our method is restricted to single word attributes. For example, the word seito (students) can be used to represent the attribute seito suu (number of students) as in the sentence below. These attributes whose parts are elided (e.g., seito representing seito suu) are also useful since they are actually used in sentences as in the above example. Therefore, they should be assessed as correct attributes in some way. Although the most appropriate question for seito representing seito suu is (6) in Fig. 2 , it is unfortunately ungrammatical since ikutu cannot be used for the number of persons. Therefore, seito representing seito suu will fail the QA test. 6 In Japanese, most of the elided parts can be restored by adding appropriate suffixes (as \"suu\" (number of) in the previous example) or by adding \"no\" + nominalized adjectives. Thus, when the attribute word failed the first QA test, we asked the evaluators to re-do the QA test by choosing an appropriate suffix or a nominalized adjective from the list of allowed augmentations and adding it to the end of the evaluated word. Figure 3 lists the allowed augmentations. 7, 8 ",
"cite_spans": [
{
"start": 873,
"end": 874,
"text": "6",
"ref_id": "BIBREF5"
},
{
"start": 1342,
"end": 1344,
"text": "7,",
"ref_id": "BIBREF6"
},
{
"start": 1345,
"end": 1346,
"text": "8",
"ref_id": "BIBREF7"
}
],
"ref_spans": [
{
"start": 713,
"end": 719,
"text": "Fig. 2",
"ref_id": null
},
{
"start": 1300,
"end": 1308,
"text": "Figure 3",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Suffix Augmented QA Test",
"sec_num": "3.2"
},
{
"text": "Although our primal aim was to acquire the attributes for a given class, i.e., , to find attributes that are common to all the instances of the class, we found, in preliminary experiments, that some uncommon (but interesting) attributes were assessed as correct according to the QA test depending on the evaluator. An example is subtitle for the class movie. Strictly speaking, subtitle is not an attribute of all movies, since all movies do not necessarily have subtitles. For example, only foreign films have subtitles in Japan. However, we think this attribute is also useful in practice for people who have a keen interest in foreign films. Thus, the evaluators were asked whether the attribute was common for most instances of the class when the attribute was judged to be correct in the QA test. We call attributes that passed this generality test general attributes, and those that failed but passed the QA test relaxed attributes (note that general attributes is a subset of relaxed attributes). We compare the accuracies for the relaxed and general attributes in the experiments. 6 Seito (representing students) might pass the QA test with question type (2) in Fig. 2. However, this is not always the case since some evaluators will judge the question to be unnatural. 7 Postposition \"no (of)\" before the suffix is also allowed to be added if it makes the question more natural. 8 The problem here might not occur if we used many more question types in the first QA test. However, we did not do this to keep the first QA test simple. With the same motivation, we kept the list of allowed suffixes short (only general and important suffixes). The uncovered cases were treated by adding nominalized adjectives.",
"cite_spans": [
{
"start": 1089,
"end": 1090,
"text": "6",
"ref_id": "BIBREF5"
},
{
"start": 1278,
"end": 1279,
"text": "7",
"ref_id": "BIBREF6"
},
{
"start": 1388,
"end": 1389,
"text": "8",
"ref_id": "BIBREF7"
}
],
"ref_spans": [
{
"start": 1170,
"end": 1174,
"text": "Fig.",
"ref_id": null
}
],
"eq_spans": [],
"section": "Generality Test",
"sec_num": "3.3"
},
{
"text": "Finally, we should note that we designed the evaluation procedure so that the evaluators could be asked whether candidate A is a hyponym of C before the QA tests. If A is a hyponym of C, we can skip all subsequent tests since A cannot be an attribute of C. We added this test because the output of the system often contains hyponyms and these tend to cause confusion in the QA tests since expression \"C no A\" is natural even when A is a hyponym of C (e.g., \"anime no Dragon Ball (Dragon Ball [of/the] anime)\").",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Hyponymy Test",
"sec_num": "3.4"
},
{
"text": "We first selected 32 word classes from 1,589 classes acquired from the Web with an automatic hypernym-hyponym acquisition method [14] . Here, we regarded the hypernym as the class label. Since our purpose was just to evaluate our method for classes from the Web, we selected classes that were obtained successfully. We randomly chose the 22 classes listed in Table 2 for human evaluation from these 32 classes. 9 The hyponyms were used to help the evaluators to disambiguate the meaning of class labels (if ambiguity existed).",
"cite_spans": [
{
"start": 129,
"end": 133,
"text": "[14]",
"ref_id": "BIBREF13"
},
{
"start": 411,
"end": 412,
"text": "9",
"ref_id": "BIBREF8"
}
],
"ref_spans": [
{
"start": 359,
"end": 366,
"text": "Table 2",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Experimental Setting",
"sec_num": "4.1"
},
{
"text": "To collect LD(C), we used the Web search engine goo (http://www.goo.ne.jp). The size of LD(C) was 857 documents (URLs) on class average. There were about 20, 000 candidate words on class average. As global document set G required for the calculation of df idf (C, A), we used 1.0\u00d710 6 randomly downloaded Web documents. We output the top 50 attributes for each class ranked with our proposed method and with alternative methods that were used for comparison. We gathered outputs for all the methods, removing duplication (i.e., taking the set union) to achieve efficient evaluation, and re-sorted them randomly to ensure that the assessment was unbiased. Four human evaluators assessed these gathered attributes class-by-class in four days using a GUI tool implementing the evaluation procedure described in Sect. 3. There were a total of 3, 678 evaluated attributes. Using the evaluation results, we re-constructed the evaluations for the top 50 for each method. The kappa value [15] , which indicates inter-evaluator agreement, was 0.533 for the general attribute case and 0.593 for the relaxed attribute case. According to [15] , these kappa values indicate \"moderate\" agreement. Figure 4 has accuracy graphs for the proposed method for relaxed attributes.",
"cite_spans": [
{
"start": 980,
"end": 984,
"text": "[15]",
"ref_id": "BIBREF14"
},
{
"start": 1126,
"end": 1130,
"text": "[15]",
"ref_id": "BIBREF14"
}
],
"ref_spans": [
{
"start": 1183,
"end": 1191,
"text": "Figure 4",
"ref_id": null
}
],
"eq_spans": [],
"section": "Experimental Setting",
"sec_num": "4.1"
},
{
"text": "The graph on the left shows per-evaluator precision when the top n (represented by x axis) attributes were output. The precision is the average over all classes. Although we cannot calculate the actual recall, the x axis corresponds to approximate recall. We can see that ranking with the proposed method has a positive correlation with human evaluation, although the assessments varied greatly depending on the evaluator. The graph on the right shows curves for average (with standard deviation), 3-consensus, and 4-consensus precision. 3consensus (4-consensus) is precision where the attribute is considered correct by at least three (four) evaluators. Figure 5 has graphs for the general attribute case the same as for the relaxed case. Although there is a positive correlation between ranking with the proposed method and human evaluators, the precision was, not surprisingly, lower than that for the relaxed case. In addition, the lower kappa value (0.533 compared to 0.593 for the relaxed case) indicated that the generality test was harder than the QA tests.",
"cite_spans": [],
"ref_spans": [
{
"start": 655,
"end": 663,
"text": "Figure 5",
"ref_id": null
}
],
"eq_spans": [],
"section": "Accuracy of Proposed Method",
"sec_num": "4.2"
},
{
"text": "The accuracy of the proposed method was encouraging. Although we cannot easily determine which indicator is appropriate, if we use the majority rule (3- We calculated the change in precision \"per evaluator\", and then calculated the averaged change, i.e., the change averaged over evaluators. Figure 6 plots the averaged change and standard deviations. The effect of n(C, A) is represented by \"Proposed -pattern (web)\", that of f (C, A) by \"Proposed -pattern (news)\", that of t(C, A) by \"Proposed -tag\", and that of df idf (C, A) by \"Proposeddfidf\". In the relaxed attribute case, we can see that most of the scores were effective at almost all ranks regardless of the evaluator (negative difference means positive effect). The effect of f (C, A) and t(C, A) was especially remarkable. Although n(C, A) has a similar curve to f (C, A), the effect is weaker. This may be caused by the difference in the number of documents available (As we previously described, we currently cannot obtain a large number of documents from the Web). The effect df idf (C, A) had was two-fold. This contributed positively at lower ranks but it contributed negatively at higher ranks (around the top 1-5). In the general attribute case, the positive effect became harder to observe although the tendency was similar to the relaxed case. However, we can see that f (C, A) still contributed greatly even in this case. The effect of t(C, A), on the other hand, seems to have weakened greatly.",
"cite_spans": [],
"ref_spans": [
{
"start": 292,
"end": 300,
"text": "Figure 6",
"ref_id": null
}
],
"eq_spans": [],
"section": "Accuracy of Proposed Method",
"sec_num": "4.2"
},
{
"text": "If we have a hypernym-hyponym knowledge base, we can also collect the local document set by using the hyponyms in the class as the keywords for the search engine instead of using the class label (hypernym). In this experiment, we compared the proposed method with this alternative. We collected about the same number of documents for the alternative method as for the proposed method to focus on the quality of collected documents. We used hyponyms with the alternative method instead of class label C in patterns for n(C, A) (thus n(Hs, A) to be precise). f (C, A) was unchanged. Figure 7 plots the results in the same way as for the previous analysis (i.e., difference from the proposed method). We can see that the class label is better than hyponyms for collecting local documents at least in the current setting. ",
"cite_spans": [],
"ref_spans": [
{
"start": 581,
"end": 589,
"text": "Figure 7",
"ref_id": "FIGREF2"
}
],
"eq_spans": [],
"section": "Effect of Hypernym",
"sec_num": "4.4"
},
{
"text": "Several studies have attempted to acquire attributes or attribute-value pairs [1, 3, 7, 8, 16] . Yoshida [1] proposed a method of integrating tables on the Web. Although his method consequently acquired attributes, he did not evaluate the accuracy of attributes. Yoshida et al. [16] proposed a method of identifying attribute-value pairs in Web documents. However, since this method only identified the attributes obtained with the method in [1] , the coverage might be bounded by the coverage of tables for attributes. Moreover, these methods did not utilize the statistics for words or lexico-syntactic patterns as ours did. Takahashi et al. [8] extracted triples (object, attribute, value) from newspaper articles using lexico-syntactic patterns and statistical scores. However, they focused only on proper nouns and selected the attribute candidates manually. Freishmann et al. [3] extracted attribute-value pairs with a high degree of precision by filtering the candidates extracted with lexico-syntactic patterns by using a model learned with supervised learning. Although this approach is promising, their method was limited to person names and we must prepare training data to apply the method to other types of objects.",
"cite_spans": [
{
"start": 78,
"end": 81,
"text": "[1,",
"ref_id": "BIBREF0"
},
{
"start": 82,
"end": 84,
"text": "3,",
"ref_id": "BIBREF2"
},
{
"start": 85,
"end": 87,
"text": "7,",
"ref_id": "BIBREF6"
},
{
"start": 88,
"end": 90,
"text": "8,",
"ref_id": "BIBREF7"
},
{
"start": 91,
"end": 94,
"text": "16]",
"ref_id": "BIBREF15"
},
{
"start": 105,
"end": 108,
"text": "[1]",
"ref_id": "BIBREF0"
},
{
"start": 278,
"end": 282,
"text": "[16]",
"ref_id": "BIBREF15"
},
{
"start": 442,
"end": 445,
"text": "[1]",
"ref_id": "BIBREF0"
},
{
"start": 644,
"end": 647,
"text": "[8]",
"ref_id": "BIBREF7"
},
{
"start": 882,
"end": 885,
"text": "[3]",
"ref_id": "BIBREF2"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "5.1"
},
{
"text": "Clues based on QA tests. The current ranking, Eq. (1), does not exploit the observation behind the criteria in Sect. 3. Only the lexico-syntactic patterns \"C no A\" slightly reflect the criteria. Higher accuracy might be achieved by using patterns that directly reflect the QA tests, e.g., statistics from FAQ lists. The hyponym tests in Sect. 3.4 can also be reflected if we use a hyponymy database. In addition, it is not surprising that the proposed method was not efficient at acquiring general attributes since the score was not meant for that (although the use of class labels might be a contributing factor, ambiguous class labels cause problems at the same time). The hyponym database might be exploited to measure the generality of attributes.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Future Directions",
"sec_num": "5.2"
},
{
"text": "Full use of the Web. The current method cannot use all Web documents due to limitations with search engines. The more Web documents we have, the more useful the score n(C, A). We are currently planning to prepare our own non-restricted Web repository. Using this, we would also like to elaborate on the comparison described in Sect. 4.4 between the use of hypernyms (class labels) and hyponyms (instance words) in collecting the local document set.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Future Directions",
"sec_num": "5.2"
},
{
"text": "Assessment of Coverage. Currently, the actual recall with the proposed method is unknown. It will be important to estimate how many attributes are needed for practical applications, e.g., by manually analyzing the use of pattern \"C no A\" exhaustively for a certain class, C. In addition, since we selected classes that were successfully obtained with a hyponymy acquisition method, we cannot deny the possibility that the proposed method has been evaluated for the classes for which reliable statistics can easily be obtained. Thus, the evaluation of more difficult (e.g., more infrequent) classes will be an important future work.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Future Directions",
"sec_num": "5.2"
},
{
"text": "Type Acquisition. What types of questions and what types of suffix augmentations are possible for a given attribute (i.e., the type of attribute value) might also be useful, e.g., in value extraction and in determining type of the attribute (in the sense of \"property or part-of\"). This was left for the evaluators to chose arbitrarily in this study. We would like to extract such knowledge from the Web using similar techniques such as word statistics and lexico-syntactic patterns.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Future Directions",
"sec_num": "5.2"
},
{
"text": "We presented a method of acquiring attributes that utilizes statistics on words, lexico-syntactic patterns, and HTML tags. We also proposed criteria and an evaluation procedure based on question-answerability. Using the procedure, we conducted experiments with four human evaluators. The results revealed that our method could obtain attributes with a high degree of precision.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "6"
},
{
"text": "Note that there are actually no spaces between words in Japanese. The spaces are for easier understanding.3 Yomiuri newspaper 1987-2001, Mainichi newspaper 1991-1999, and Nikkei newspaper 1983-1990; 3.01 GB in total. We used a Japanese dependency parser[13].4 The differences from n(C, A) were introduced to reuse the existing parsed corpus.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "Stapler might be an important attribute of companies for stationery sellers. However, we focus on attributes that are important for most people in most situations.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "This selection was due to time/cost limitations.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [
{
"text": "consensus in our case) employed in [7] , the proposed method obtained relaxed attributes with 0.852 precision and general attributes with 0.727 precision for the top 20 outputs. Table 3 lists the top 20 attributes obtained with the proposed method for several classes. The numeral before (after) \"/\" is the number of evaluators who judged the attribute as correct as a relaxed (general) attribute.We can see that many interesting attributes were obtained.",
"cite_spans": [
{
"start": 35,
"end": 38,
"text": "[7]",
"ref_id": "BIBREF6"
}
],
"ref_spans": [
{
"start": 178,
"end": 185,
"text": "Table 3",
"ref_id": null
}
],
"eq_spans": [],
"section": "annex",
"sec_num": null
},
{
"text": "In this analysis, we assessed the effect that sub-scores in Eq. (1) had on the acquisition accuracy by observing the decrease in precision when we removed each score from Eq. (1). First, we could observe a positive effect for most scores in terms of the precision averaged over evaluators. Moreover, interestingly, the tendency of the effect was very similar for all evaluators, even though the assessments varied greatly depending on the evaluator as the previous experiment showed. Due to space limitations, we will only present the latter analysis here.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Effect of Scores",
"sec_num": "4.3"
}
],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Extracting attributes and their values from web pages",
"authors": [
{
"first": "M",
"middle": [],
"last": "Yoshida",
"suffix": ""
}
],
"year": 2002,
"venue": "Proc. of the ACL 2002 Student Research Workshop",
"volume": "",
"issue": "",
"pages": "72--77",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yoshida, M.: Extracting attributes and their values from web pages. In: Proc. of the ACL 2002 Student Research Workshop. (2002) 72-77",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Integrating tables on the world wide web",
"authors": [
{
"first": "M",
"middle": [],
"last": "Yoshida",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Torisawa",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Tsujii",
"suffix": ""
}
],
"year": 2004,
"venue": "Transactions of the Japanese Society for Artificial Intelligence",
"volume": "19",
"issue": "",
"pages": "548--560",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yoshida, M., Torisawa, K., Tsujii, J.: Integrating tables on the world wide web. Transactions of the Japanese Society for Artificial Intelligence 19 (2004) 548-560",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Offline strategies for online question answering: Answering questions before they are asked",
"authors": [
{
"first": "M",
"middle": [],
"last": "Fleischman",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Hovy",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Echihabi",
"suffix": ""
}
],
"year": 2003,
"venue": "Proc. of ACL 2003",
"volume": "",
"issue": "",
"pages": "1--7",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Fleischman, M., Hovy, E., Echihabi, A.: Offline strategies for online question answering: Answering questions before they are asked. In: Proc. of ACL 2003. (2003) 1-7",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Attribute-based and value-based clustering: An evaluation",
"authors": [
{
"first": "A",
"middle": [],
"last": "Almuhareb",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Poesio",
"suffix": ""
}
],
"year": 2004,
"venue": "Proc. of EMNLP",
"volume": "",
"issue": "",
"pages": "158--165",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Almuhareb, A., Poesio, M.: Attribute-based and value-based clustering: An eval- uation. In: Proc. of EMNLP 2004. (2004) 158-165",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "WordNet: An electronic lexical database",
"authors": [
{
"first": "C",
"middle": [],
"last": "Fellbaum",
"suffix": ""
}
],
"year": 1998,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Fellbaum, C., ed.: WordNet: An electronic lexical database. The MIT Press (1998)",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Automatic acquisition of hyponyms from large text corpora",
"authors": [
{
"first": "M",
"middle": [
"A"
],
"last": "Hearst",
"suffix": ""
}
],
"year": 1992,
"venue": "Proc. of COLING '92",
"volume": "",
"issue": "",
"pages": "539--545",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hearst, M.A.: Automatic acquisition of hyponyms from large text corpora. In: Proc. of COLING '92. (1992) 539-545",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Finding parts in very large corpora",
"authors": [
{
"first": "M",
"middle": [],
"last": "Berland",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Charniak",
"suffix": ""
}
],
"year": 1999,
"venue": "Proc. of ACL '99",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Berland, M., Charniak, E.: Finding parts in very large corpora. In: Proc. of ACL '99. (1999)",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Automatic extraction of attribute relations from text",
"authors": [
{
"first": "T",
"middle": [],
"last": "Takahashi",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Inui",
"suffix": ""
},
{
"first": "Y",
"middle": [],
"last": "Matsumoto",
"suffix": ""
}
],
"year": 2004,
"venue": "",
"volume": "",
"issue": "",
"pages": "19--24",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Takahashi, T., Inui, K., Matsumoto, Y.: Automatic extraction of attribute relations from text (in Japanese). IPSJ, SIG-NLP. NL-164 (2004) 19-24",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Concepts, attributes and arbitrary relations: some linguistic and ontological criteria for structuring knowledge base",
"authors": [
{
"first": "N",
"middle": [],
"last": "Guarino",
"suffix": ""
}
],
"year": 1992,
"venue": "Data and Knowledge Engineering",
"volume": "",
"issue": "",
"pages": "249--261",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Guarino, N.: Concepts, attributes and arbitrary relations: some linguistic and on- tological criteria for structuring knowledge base. Data and Knowledge Engineering (1992) 249-261",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "The Generative Lexicon",
"authors": [
{
"first": "J",
"middle": [],
"last": "Pustejovsky",
"suffix": ""
}
],
"year": 1995,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Pustejovsky, J.: The Generative Lexicon. The MIT Press (1995)",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "What's in a Link: Foundations for Semantic Networks",
"authors": [
{
"first": "W",
"middle": [
"A"
],
"last": "Woods",
"suffix": ""
}
],
"year": 1975,
"venue": "Representation and Understanding: Studies in Cognitive Science",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Woods, W.A.: What's in a Link: Foundations for Semantic Networks. In: Repre- sentation and Understanding: Studies in Cognitive Science. Academic Press (1975)",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Japanese morphological analysis system JUMAN version 3",
"authors": [
{
"first": "S",
"middle": [],
"last": "Kurohashi",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Nagao",
"suffix": ""
}
],
"year": 1999,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Kurohashi, S., Nagao, M.: Japanese morphological analysis system JUMAN version 3.61 manual (1999)",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "A hybrid Japanese parser with hand-crafted grammar and statistics",
"authors": [
{
"first": "H",
"middle": [],
"last": "Kanayama",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Torisawa",
"suffix": ""
},
{
"first": "Y",
"middle": [],
"last": "Mitsuishi",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Tsujii",
"suffix": ""
}
],
"year": 2000,
"venue": "Proc. of COLING",
"volume": "",
"issue": "",
"pages": "411--417",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Kanayama, H., Torisawa, K., Mitsuishi, Y., Tsujii, J.: A hybrid Japanese parser with hand-crafted grammar and statistics. In: Proc. of COLING 2000. (2000) 411-417",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Acquiring hyponymy relations from web documents",
"authors": [
{
"first": "K",
"middle": [],
"last": "Shinzato",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Torisawa",
"suffix": ""
}
],
"year": 2004,
"venue": "Proc. of HLT-NAACL04",
"volume": "",
"issue": "",
"pages": "73--80",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Shinzato, K., Torisawa, K.: Acquiring hyponymy relations from web documents. In: Proc. of HLT-NAACL04. (2004) 73-80",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "The measurement of observer agreement for categorial data",
"authors": [
{
"first": "J",
"middle": [
"R"
],
"last": "Landis",
"suffix": ""
},
{
"first": "G",
"middle": [
"G"
],
"last": "Koch",
"suffix": ""
}
],
"year": 1977,
"venue": "Biometrics",
"volume": "33",
"issue": "",
"pages": "159--174",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Landis, J.R., Koch, G.G.: The measurement of observer agreement for categorial data. Biometrics 33 (1977) 159-174",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "Chapter 10 (Extracting Attributes and Their Values from Web Pages)",
"authors": [
{
"first": "M",
"middle": [],
"last": "Yoshida",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Torisawa",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Tsujii",
"suffix": ""
}
],
"year": 2003,
"venue": "Web Document Analysis. World Scientific",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yoshida, M., Torisawa, K., Tsujii, J.: Chapter 10 (Extracting Attributes and Their Values from Web Pages). In: Web Document Analysis. World Scientific (2003)",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"text": "number of students of this school is 500.) e.g., \"height of\" \"prettiness of\") Allowed augmentation",
"uris": null,
"type_str": "figure",
"num": null
},
"FIGREF1": {
"text": "Accuracy of relaxed attributes Accuracy of general attributes Effect of scores. Left: relaxed attribute. Right: general attribute.",
"uris": null,
"type_str": "figure",
"num": null
},
"FIGREF2": {
"text": "Effect of hypernyms. Left: relaxed case. Right: general case.",
"uris": null,
"type_str": "figure",
"num": null
},
"TABREF1": {
"content": "<table><tr><td>5.</td><td>C</td><td>A</td><td>? (kono C no A ha dore?/Which is the A of this C?)</td></tr><tr><td>6.</td><td>C</td><td>A</td><td>? (kono C no A ha ikutu?/How many is the A of this C?)</td></tr><tr><td>7.</td><td>C</td><td>A</td><td>? (kono C no A ha dou?/How much is the A of this C?)</td></tr><tr><td/><td/><td/><td>Fig. 2. Question-answerability Test</td></tr></table>",
"type_str": "table",
"html": null,
"text": "Are any of the following questions grammatically correct, natural, and answerable? 1. C A ? (kono C no A ha nani?/What is the A of this C?) 2. C A ? (kono C no A ha dare?/Who is the A of this C?) 3. C A ? (kono C no A ha itu?/When is the A of this C?) 4. C A ? (kono C no A ha doko?/Where is the A of this C?)",
"num": null
},
"TABREF2": {
"content": "<table><tr><td>(city),</td><td/><td colspan=\"2\">(museum),</td><td colspan=\"2\">(national holiday),</td><td>(police),</td><td>(facility),</td><td>(university),</td></tr><tr><td colspan=\"2\">(newspaper),</td><td colspan=\"2\">(garbage),</td><td>(shrine),</td><td>(bird),</td><td colspan=\"2\">(hospital),</td><td>(plant),</td><td>(river),</td></tr><tr><td colspan=\"3\">(elementary school),</td><td colspan=\"2\">(music tune),</td><td>(library),</td><td colspan=\"2\">(branch office),</td><td>(web site),</td></tr><tr><td>(town),</td><td colspan=\"2\">(sensor),</td><td colspan=\"2\">(training),</td><td>(car)</td><td/></tr></table>",
"type_str": "table",
"html": null,
"text": "Classes used in evaluation",
"num": null
}
}
}
} |