File size: 95,642 Bytes
6fa4bc9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 |
{
"paper_id": "O09-1006",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T08:10:45.130506Z"
},
"title": "Query Formulation by Selecting Good Terms",
"authors": [
{
"first": "Chia-Jung",
"middle": [],
"last": "\u674e\u4f73\u84c9",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Yi-Chun",
"middle": [],
"last": "Lee",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Ruey-Cheng",
"middle": [],
"last": "Lin",
"suffix": "",
"affiliation": {},
"email": "rueycheng@gmail.com"
},
{
"first": "",
"middle": [],
"last": "Chen",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Pei-Sen",
"middle": [],
"last": "\u5289\u57f9\u68ee",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "",
"middle": [],
"last": "Liu",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "\u912d\u535c\u58ec",
"middle": [],
"last": "Pu",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Jen",
"middle": [],
"last": "Cheng",
"suffix": "",
"affiliation": {},
"email": "pjcheng@csie.ntu.edu.tw"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "It is difficult for users to formulate appropriate queries for search. In this paper, we propose an approach to query term selection by measuring the effectiveness of a query term in IR systems based on its linguistic and statistical properties in document collections. Two query formulation algorithms are presented for improving IR performance. Experiments on NTCIR-4 and NTCIR-5 ad-hoc IR tasks demonstrate that the algorithms can significantly improve the retrieval performance by 9.2% averagely, compared to the performance of the original queries given in the benchmarks. Experiments also show that our method can be applied to query expansion and works satisfactorily in selection of good expansion terms.",
"pdf_parse": {
"paper_id": "O09-1006",
"_pdf_hash": "",
"abstract": [
{
"text": "It is difficult for users to formulate appropriate queries for search. In this paper, we propose an approach to query term selection by measuring the effectiveness of a query term in IR systems based on its linguistic and statistical properties in document collections. Two query formulation algorithms are presented for improving IR performance. Experiments on NTCIR-4 and NTCIR-5 ad-hoc IR tasks demonstrate that the algorithms can significantly improve the retrieval performance by 9.2% averagely, compared to the performance of the original queries given in the benchmarks. Experiments also show that our method can be applied to query expansion and works satisfactorily in selection of good expansion terms.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Users are often supposed to give effective queries so that the return of an information retrieval (IR) system is anticipated to cater to their information needs. One major challenge they face is what terms should be generated when formulating the queries. The general assumption of previous work [14] is that nouns or noun phrases are more informative than other parts of speech (POS), and longer queries could provide more information about the underlying information need. However, are the query terms that the users believe to be well-performing really effective in IR?",
"cite_spans": [
{
"start": 296,
"end": 300,
"text": "[14]",
"ref_id": "BIBREF10"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1."
},
{
"text": "Consider the following description of the information need of a user, which is an example description query in NTCIR-4: Find articles containing the reasons for NBA Star Michael Jordan's retirement and what effect it had on the Chicago Bulls. Removing stop words is a common way to form a query such as \"contain, reason, NBA Star, Michael Jordan, retirement, effect, had, Chicago Bulls\", which scores a mean average precision (MAP) of 0.1914. It appears obviously that terms contain and had carry relatively less information about the topic. Thus, we take merely nouns into account and generate another query, \"reason, NBA Star, Michael Jordan, retirement, effect, Chicago Bulls\", which achieves a better MAP of 0.2095. When carefully analyzing these terms, one could find that the meaning of Michael Jordan is more precise than that of NBA Star, and hence we improve MAP by 14% by removing NBA Star. Yet interestingly, the performance of removing Michael Jordan is not as worse as we think it would be. This might be resulted from that Michael Jordan is a famous NBA Star in Chicago Bulls. However, what if other terms such as reason and effect are excluded? There is no explicit clue to help users determine what terms are effective in an IR system, especially when they lack experience of searching documents in a specific domain. Without comprehensively understanding the document collection to be retrieved, it is difficult for users to generate appropriate queries. As the effectiveness of a term in IR depends on not only how much information it carries in a query (subjectivity from users) but also what documents there are in a collection (objectivity from corpora), it is, therefore, important to measure the effectiveness of query terms in an automatic way. Such measurement is useful in selection of effective and ineffective query terms, which can benefit many IR applications such as query formulation and query expansion.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1."
},
{
"text": "Conventional methods of retrieval models, query reformulation and expansion [13] attempt to learn a weight for each query term, which in some sense corresponds to the importance of the query term. Unfortunately, such methods could not explain what properties make a query term effective for search. Our work resembles some previous works with the aim of selecting effective terms. [1,3] focus on discovering key concepts from noun phrases in verbose queries with different weightings. Our work focuses on how to formulate appropriate queries by selecting effective terms or dropping ineffective ones. No weight assignments are needed and thus conventional retrieval models could be easily incorporated. [4] uses a supervised learning method for selecting good expansion terms from a number of candidate terms generated by pseudo-relevance feedback technique. However, we differ in that, (1) [4] selects specific features so as to emphasize more on the relation between original query and expansion terms without consideration of linguistic features, and (2) our approach does not introduce extra terms for query formulation. Similarly, [10] attempts to predict which words in query should be deleted based on query logs. Moreover, a number of works [2, 5, 6, 7, 9, 15, 16, 18, 19, 20] pay attention to predict the quality or difficulty of queries, and [11, 12] try to find optimal sub-queries by using maximum spanning tree with mutual information as the weight of each edge. However, their focus is to evaluate performance of a whole query whereas we consider units at the level of terms.",
"cite_spans": [
{
"start": 76,
"end": 80,
"text": "[13]",
"ref_id": "BIBREF9"
},
{
"start": 703,
"end": 706,
"text": "[4]",
"ref_id": "BIBREF0"
},
{
"start": 891,
"end": 894,
"text": "[4]",
"ref_id": "BIBREF0"
},
{
"start": 1136,
"end": 1140,
"text": "[10]",
"ref_id": "BIBREF6"
},
{
"start": 1253,
"end": 1255,
"text": "5,",
"ref_id": "BIBREF1"
},
{
"start": 1256,
"end": 1258,
"text": "6,",
"ref_id": "BIBREF2"
},
{
"start": 1259,
"end": 1261,
"text": "7,",
"ref_id": "BIBREF3"
},
{
"start": 1262,
"end": 1264,
"text": "9,",
"ref_id": "BIBREF5"
},
{
"start": 1265,
"end": 1268,
"text": "15,",
"ref_id": "BIBREF11"
},
{
"start": 1269,
"end": 1272,
"text": "16,",
"ref_id": "BIBREF12"
},
{
"start": 1273,
"end": 1276,
"text": "18,",
"ref_id": "BIBREF14"
},
{
"start": 1277,
"end": 1280,
"text": "19,",
"ref_id": "BIBREF15"
},
{
"start": 1281,
"end": 1284,
"text": "20]",
"ref_id": "BIBREF16"
},
{
"start": 1352,
"end": 1356,
"text": "[11,",
"ref_id": "BIBREF7"
},
{
"start": 1357,
"end": 1360,
"text": "12]",
"ref_id": "BIBREF8"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1."
},
{
"text": "Given a set of possible query terms that a user may use to search documents relevant to a topic, the goal of this paper is to formulate appropriate queries by selecting effective terms from the set. Since exhaustively examining all candidate subsets is not feasible in a large scale, we reduce the problem to a simplified one that iteratively selects effective query terms from the set. We are interested in realizing (1) what characteristic of a query term makes it effective or ineffective in search, and (2) whether or not the effective query terms (if we are able to predict) can improve IR performance. We propose an approach to automatically measure the effectiveness of query terms in IR, wherein a regression model learned from training data is applied to conduct the prediction of term effectiveness of testing data. Based on the measurement, two algorithms are presented, which formulate queries by selecting effective terms and dropping ineffective terms from the given set, respectively.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1."
},
{
"text": "The merit of our approach is that we consider various aspects that may influence retrieval performance, including linguistic properties of a query term and statistical relationships between terms in a document collection such as co-occurrence and context dependency. Their impacts on IR have been carefully examined. Moreover, we have conducted extensive experiments on NTCIR-4 and NTCIR-5 ad-hoc IR tasks to evaluate the performance of the proposed approach. Based on term effectiveness prediction and two query formulation algorithms, our method significantly improve MAP by 9.2% on average, compared to the performance of the original queries given in the benchmarks.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1."
},
{
"text": "In the rest of this paper, we describe the proposed approach to term selection and query formulation in Section 2. The experimental results of retrieval performance are presented in Sections 3. Finally, in Section 4, we give our discussion and conclusions.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1."
},
{
"text": "When a user desires to retrieve information from document repositories to know more about a topic, many possible terms may come into the mind to form various queries. We call such set of the possible terms query term space T={t 1 , \u2026, t n }. A query typically consists of a subset of T. Each query term t i \uf0ce T is expected to convey some information about the user information need. It is, therefore, reasonable to assume that each query term will have different degree of effectiveness in retrieving relevant documents. To explore the impact of one query term on retrieval performance, we start the discussion with a degeneration process, which is defined as a mapping function taking the set of terms T as input and producing set {T\u2212{t 1 }, T\u2212{t 2 },\u2026,T\u2212{t n }} as output. Mathematically, the mapping function is defined as:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Term Selection Approach for Query Formulation 2.1 Observation",
"sec_num": "2."
},
{
"text": "DeGen(T) = {T \u2212 {x}|x \uf0ce T}.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Term Selection Approach for Query Formulation 2.1 Observation",
"sec_num": "2."
},
{
"text": "By applying the degeneration process to the given n terms in T, we can construct a set of n queries \u2206q = {\u2206q 1 , \u2206q 2 ,\u2026, \u2206q i ,\u2026, \u2206q n }, where \u2206q i = {t 1 , \u2026 , t i\u22121 , t i+1 , \u2026 , t n } stands for a query by removing t i from original terms T.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Term Selection Approach for Query Formulation 2.1 Observation",
"sec_num": "2."
},
{
"text": "Suppose query term space T well summaries the description of the user information need.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Term Selection Approach for Query Formulation 2.1 Observation",
"sec_num": "2."
},
{
"text": "Intuitively, we believe that the removal of a term (especially an important one) from T may result in a loss of information harming retrieval effectiveness. To realize how much such information loss may influence IR performance, we conduct an experiment on NTCIR-4 description queries. For each query, we construct its query term space T by dropping stop words. T is treated as a hypothetical user information need. The remaining terms in the description queries are individually, one at a time, selected to be removed to obtain \u2206q. Three formulas are used to measure the impact of the removing terms and defined as:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Term Selection Approach for Query Formulation 2.1 Observation",
"sec_num": "2."
},
{
"text": "T) pf(T))/pf( - ) q (pf( min (T) g i \u0394q \u0394qi \uf044 \uf03d \uf0ce min T) pf(T))/pf( - ) q (pf( max (T) g i \u0394q \u0394qi \uf044 \uf03d \uf0ce max \uf0e5 \uf044 \uf03d i i T) pf(T))/pf( - ) q (pf( |T| (T) g 1 avg",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Term Selection Approach for Query Formulation 2.1 Observation",
"sec_num": "2."
},
{
"text": "where pf(x) is a performance measurement for query x, g(T) computes the ratio of performance variation, which measures the maximum, minimum and average performance gain due to the removal of one of the terms from T, and |T| is the number of query terms in T.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Term Selection Approach for Query Formulation 2.1 Observation",
"sec_num": "2."
},
{
"text": "We use Okapi as the retrieval model and mean average precision (MAP) as our performance measurement for pf(x) in this experiment.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Term Selection Approach for Query Formulation 2.1 Observation",
"sec_num": "2."
},
{
"text": "The experimental results are shown in Figure 1 . When ",
"cite_spans": [
{
"start": 49,
"end": 53,
"text": "When",
"ref_id": null
}
],
"ref_spans": [
{
"start": 38,
"end": 46,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Term Selection Approach for Query Formulation 2.1 Observation",
"sec_num": "2."
},
{
"text": "When a user desires to retrieve information from document repositories to know more about a topic, many possible terms may come into her mind to form various queries. We call such set of the possible terms query term space T={t1, \u2026, tn}. A query typically consists of a subset of T. Each query term ti \uf0ce T is expected to convey some information about the user's information need. It is, therefore, reasonable to assume that each query term will have different degree of effectiveness in documents retrieval. Suppose Q denotes all subsets of T, that is, Q=Power Set(T) and |Q|=2 n . The problem is to choose the best subset \u2206q* among all candidates Q such that the performance gain between the retrieval performance of T and \u2206q (\u2206q \u2208 Q ) is maximized:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Problem Specification",
"sec_num": "2.2"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "\u2206 * = \u2206 \u2208 {( \u2212 \u2206 )/ ( )} .",
"eq_num": "(1)"
}
],
"section": "Problem Specification",
"sec_num": "2.2"
},
{
"text": "where pf(x) denotes a function measuring retrieval performance with x as the query. The higher the score pf(x) is, the better the retrieval performance can be achieved.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Problem Specification",
"sec_num": "2.2"
},
{
"text": "An intuitive way to solve the problem is to exhaustively examine all candidate subset members in Q and design a method to decide which the best \u2206q* is. However, since an exhaustive search is not appropriate for applications in a large scale, we reduce the problem to a simplified one that chooses the most effective query term ti (ti\u2208T) such that the performance gain between T and T-{ti} is maximized:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Problem Specification",
"sec_num": "2.2"
},
{
"text": "* = \u2208 {( \u2212 ( \u2212 { }))/ ( )} .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Problem Specification",
"sec_num": "2.2"
},
{
"text": "(2)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Problem Specification",
"sec_num": "2.2"
},
{
"text": "Once the best ti* is selected, \u2206q* could be approximated by iteratively selecting effective terms from T. Similarly, the simplified problem could be to choose the most ineffective terms from T such that the performance gain is minimized. Then \u2206q* will be approximated by iteratively removing ineffective or noisy terms from T.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Problem Specification",
"sec_num": "2.2"
},
{
"text": "Our goals are: (1) to find a function r: T \u2192R, which ranks {t1, \u2026, tn} based on their effectiveness in performance gain (MAP is used for the performance measurement in this paper), where the effective terms are selected as candidate query terms, and (2) to formulate a query from the candidates selected by function r.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Problem Specification",
"sec_num": "2.2"
},
{
"text": "To rank term ti in a given query term space T based on function r, we use a regression model to compute r directly, which predicts a real value from some observed features of ti. The regression function r: T \u2192R is generated by learning from each ti with the examples in form of <f(ti), ( \u2212 ( \u2212 { }))/ ( )> for all queries in the training corpus, where f(ti) is the feature vector of ti, which will be described in Section 2.5.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Effective Term Selection",
"sec_num": "2.3"
},
{
"text": "The regression model we adopt is Support Vector Regression (SVR), which is a regression analysis technique based on SVM [17] . The aim of SVR is to find the most appropriate hyperplane w which is able to predict the distribution of data points accurately. Thus, r can be interpreted as a function that seeks the least dissimilarity between ground truth y i = (pf T \u2212 pf(T \u2212 {t i }))/pf(T) and predicted value r(t i ), and r is required to be in the form of w f(t i )+b. Finding function r is therefore equivalent to solving the convex optimization problem:",
"cite_spans": [
{
"start": 120,
"end": 124,
"text": "[17]",
"ref_id": "BIBREF13"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Effective Term Selection",
"sec_num": "2.3"
},
{
"text": ", , ,1 , ,2 1 2 2 + ( ,1 + ,2 ).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Effective Term Selection",
"sec_num": "2.3"
},
{
"text": "(3)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Effective Term Selection",
"sec_num": "2.3"
},
{
"text": "subject to:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Effective Term Selection",
"sec_num": "2.3"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "\u2200 \u2208 y i \u2212 (w f(t i )+b) \u2265 + ,1",
"eq_num": "(4)"
}
],
"section": "Effective Term Selection",
"sec_num": "2.3"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "\u2200 : ,1 , ,2 \u2265 0 (w f(t i )+b) \u2212 y i \u2265 + ,2 .",
"eq_num": "(5)"
}
],
"section": "Effective Term Selection",
"sec_num": "2.3"
},
{
"text": "where C determines the tradeoff between the flatness of r and the amount up to which deviations larger than \u03b5 are tolerated, \u03b5 is the maximum acceptable difference between the predicted and actual values we wish to maintain, and ,1 and ,2 are slack variables that cope with otherwise infeasible constraints of the optimization problem. We use the SVR implementation of LIBSVM [8] to solve the optimization problem.",
"cite_spans": [
{
"start": 376,
"end": 379,
"text": "[8]",
"ref_id": "BIBREF4"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Effective Term Selection",
"sec_num": "2.3"
},
{
"text": "Ranking terms in query term space T={t 1 , \u2026, t n } according to their effectiveness is then equivalent to applying regression function to each t i ; hence, we are able to sort terms t i \uf0ce T into an ordering sequence of effectiveness or ineffectiveness by r(t i ).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Effective Term Selection",
"sec_num": "2.3"
},
{
"text": "Algorithms Generation and Reduction, as shown in Fig. 2 , formulate queries by greedily selecting effective terms or dropping ineffective terms from space T based on function r.",
"cite_spans": [],
"ref_spans": [
{
"start": 49,
"end": 55,
"text": "Fig. 2",
"ref_id": "FIGREF1"
}
],
"eq_spans": [],
"section": "Generation and Reduction",
"sec_num": "2.4"
},
{
"text": "When formulating a query from query term space T, the Generation algorithm computes a measure of effectiveness r(t i ) for each term t i \uf0ce T, includes the most effective term t i * and repeats the process until k terms are chosen (where k is a empirical value given by users). Note that T is changed during the selection process, and thus statistical features should be re-estimated according to new T. The selection of the best candidate term ensures that the current selected term t i * is the most informative one among those that are not selected yet.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Generation and Reduction",
"sec_num": "2.4"
},
{
"text": "Compared to generation, the Reduction algorithm always selects the most ineffective term from current T in each iteration. Since users may introduce noisy terms in query term space T, Reduction aims to remove such ineffective terms and will repeat the process until |T|-k terms are chosen.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Generation and Reduction",
"sec_num": "2.4"
},
{
"text": "Input: T={t 1, t 2, \u2026,t n } (query term space) k (# of terms to be selected) \u2206q\u2190{ }",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Algorithm Generation Algorithm Reduction",
"sec_num": null
},
{
"text": "for i = 1 to k do * \u2190 \u2208 { } \u2206q\u2190 \u2206q \u222a { * } T\u2190 T \u2212{ * } end Output \u2206q",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Algorithm Generation Algorithm Reduction",
"sec_num": null
},
{
"text": "Input: T={t 1, t 2, \u2026,t n } (query term space) k (# of terms to be selected) \u2206q\u2190{ t 1, t 2, \u2026,t n } Linguistic Features: Terms with certain linguistic properties are often viewed semantics-bearing and informative for search. Linguistic features of query terms are mainly inclusive of parts of speech (POS) and named entities (NE). In our experiment, the POS features comprise noun, verb, adjective, and adverb, the NE features include person names, locations, organizations, and time, and other linguistic features contain acronym, size (i.e., number of words in a term) and phrase, all of which have shown their importance in many IR applications. The values of these linguistic features are binary except the size feature. POS and NE are labeled manually for high quality of training data, and can be tagged automatically for purpose of efficiency alternatively.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Algorithm Generation Algorithm Reduction",
"sec_num": null
},
{
"text": "for i = 1 to n-k do * \u2190 \u2208 { } \u2206q\u2190 \u2206q \u2212 { * } T\u2190 T \u2212{ * } end Output \u2206q",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Algorithm Generation Algorithm Reduction",
"sec_num": null
},
{
"text": "Statistical features of term t i refer to the statistical information about the term in a document collection. This information could be about the term itself such as term frequency (TF) and inverse document frequency (IDF), or the relationship between the term and other terms in space T. We present two methods for estimating such term relationship. The first method depends on co-occurrences of terms t i and t j (t j \uf0ce T, t i \u2260t j ) and co-occurrences of terms t i and T-{t i } in the document collection. The former is called term-term co-occur feature while the latter is called term-topic co-occur feature. The second method extracts so-called context vectors as features from the search results of t i , t j , and T-{t i }, respectively. The term-term context feature computes the similarity between the context vectors of t i and t j while the term-topic context feature computes the similarity between context vectors of t i and T-{t i }.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Statistical Features:",
"sec_num": null
},
{
"text": "The features are used to measure whether query term t i itself could be replaced with another term t j (or remaining terms T-{t i }) in T and how much the intension is. The term without substitutes is supposed to be important in T.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Term-term & term-topic co-occur features:",
"sec_num": null
},
{
"text": "Point-wise mutual information (PMI), Chi-square statistics (X 2 ), and log-likelihood ratio (LLR) are used to measure co-occurrences between t i and Z, which is either t j or T-{t i } in this paper. Suppose that N is the number of documents in the collection, a is the number of documents containing both t i and Z, denoted as a = #d(t i ,Z). Similarly, we denote b",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Term-term & term-topic co-occur features:",
"sec_num": null
},
{
"text": "= #d(t i ,~Z) c = #d(~t i ,Z) and d = #d(~t i ,~Z) i.e., Z=N-a-b-c.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Term-term & term-topic co-occur features:",
"sec_num": null
},
{
"text": "PMI is a measure of how much term t i tells us about Z.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Term-term & term-topic co-occur features:",
"sec_num": null
},
{
"text": "PMI t i , Z = log[p(t i , Z)/p t i p(Z)] \u2248 log[a \u00d7 N/ a + b (a + c)] (6)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Term-term & term-topic co-occur features:",
"sec_num": null
},
{
"text": "X 2 compares the observed frequencies with frequencies expected for independence.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Term-term & term-topic co-occur features:",
"sec_num": null
},
{
"text": "\u03c7 2 t i , Z = N \u00d7 a \u00d7 d \u2212 b \u00d7 c 2 /[ a + b a + c b + d (c + d)] (7)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Term-term & term-topic co-occur features:",
"sec_num": null
},
{
"text": "LLR is a statistical test for making a decision between two hypotheses of dependency or independency based on the value of this ratio. We make use of average, minimum, and maximum metrics to diagnose term-term co-occur features over all possible pairs of (t i ,t j ), for any \u2260 :",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Term-term & term-topic co-occur features:",
"sec_num": null
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "= 1 | | ( \u2200 \u2208 , \u2260 , ),",
"eq_num": "(9)"
}
],
"section": "Term-term & term-topic co-occur features:",
"sec_num": null
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "= max \u2200 \u2208 , \u2260 X , & = min \u2200 \u2208 , \u2260 X( , )",
"eq_num": "(10)"
}
],
"section": "Term-term & term-topic co-occur features:",
"sec_num": null
},
{
"text": "where X is PMI, LLR or X 2 . Moreover, given T={t 1 , \u2026, t n } as a training query term space, we sort all terms t i according to their , , or , and their rankings varied from 1 to n are treated the additional features.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Term-term & term-topic co-occur features:",
"sec_num": null
},
{
"text": "The term-topic co-occur features are nearly identical to the term-term co-occur features with an exception that term-topic co-occur features are used in measuring the relationship between t i and query topic T-{ }. The co-occur features can be quickly computed from the indices of IR systems with caches.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Term-term & term-topic co-occur features:",
"sec_num": null
},
{
"text": "The co-occurrence features are reliable for estimating the relationship between high-frequency query terms. Unfortunately, term t i is probably not co-occurring with T-{t i } in the document collection at all. The context features are hence helpful for low-frequency query terms that share common contexts in search results.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Term-term & term-topic context features:",
"sec_num": null
},
{
"text": "More specifically, we generate the context vectors from the search results of t i and t j (or T-{t i }), respectively. The context vector is composed of a list of pairs <document ID, relevance score>, which can be obtained from the search results returned by IR systems. The relationship between t i and t j (or T-{t i }) is captured by the cosine similarity between their context vectors. Note that to extract the context features, we are required to retrieve documents. The retrieval performance may affect the quality of the context features and the process is time-consuming. We conduct extensive experiments on NTCIR-4 and NTCIR-5 English-English ad-hoc IR tasks. Table 1 shows the statistics of the data collections. We evaluate our methods with description queries, whose average length is 14.9 query terms. Both queries and documents are stemmed with the Porter stemmer and stop words are removed. The remaining query terms for each query topic form a query term space T. Three retrieval models, the vector space model (TFIDF), the language model (Indri) and the probabilistic model (Okapi), are constructed using Lemur Toolkit [21] , for examining the robustness of our methods across different frameworks. MAP is used as evaluation metric for top 1000 documents retrieved.",
"cite_spans": [
{
"start": 1136,
"end": 1140,
"text": "[21]",
"ref_id": null
}
],
"ref_spans": [
{
"start": 669,
"end": 676,
"text": "Table 1",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Term-term & term-topic context features:",
"sec_num": null
},
{
"text": "To ensure the quality of the training dataset, we remove the poorly-performing queries whose average precision is below 0.02. As different retrieval models have different MAP on the same queries, there are different numbers of training and test instances in different models. We up-sample the positive instances by repeating them up to the same number as the negative ones. Table 2 summarizes the settings for training instances.",
"cite_spans": [],
"ref_spans": [
{
"start": 374,
"end": 381,
"text": "Table 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "Experiment Settings",
"sec_num": "3.1"
},
{
"text": "We use 5-fold cross validation for training and testing our regression function r. To avoid inside test due to up-sampling, we ensure that all the instances in the training set are different from those of the test set. The 2 statistics ( 2 \u2208[0, 1]) is used to evaluate the prediction accuracy of our regression function r:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Performance of Regression Function",
"sec_num": "3.2"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "2 = ( \u2212 y ) 2 ( \u2212 y ) 2 ,",
"eq_num": "(11)"
}
],
"section": "Performance of Regression Function",
"sec_num": "3.2"
},
{
"text": "where R 2 explains the variation between true label =( \u2212 ( \u2212 { }))/ ( ) and fit value y =wf(t i )+b for each testing query term t i \u2208T, as explained in Section 2.2. y is the mean of the ground truth. [14] gives unequal importance to words with different POS. Our modified content load (m-Cl) sets weight of a noun as 1 and the weights of adjectives, verbs, and participles as 0.147 for IR.",
"cite_spans": [
{
"start": 200,
"end": 204,
"text": "[14]",
"ref_id": "BIBREF10"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Performance of Regression Function",
"sec_num": "3.2"
},
{
"text": "Our m-SCS extends the simplified clarity score (SCS) [9] as a feature by calculating the relative entropy between query terms and collection language models (unigram distributions).",
"cite_spans": [
{
"start": 53,
"end": 56,
"text": "[9]",
"ref_id": "BIBREF5"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Performance of Regression Function",
"sec_num": "3.2"
},
{
"text": "It can be seen that our function r is quite independent of retrieval models. The performance of the statistical features is better than that of the linguistic features because the statistical features reflect the statistical relationship between query terms in the document collections.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Performance of Regression Function",
"sec_num": "3.2"
},
{
"text": "Combining both outperforms each one, which reveals both features are complementary. The improvement by m-Cl and m-SCS is not clear due to their similarity to the other features.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Performance of Regression Function",
"sec_num": "3.2"
},
{
"text": "Combining all features achieves the best R 2 value 0.945 in average, which guarantees us a large portion of explainable variation in y and hence our regression model r is reliable.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Performance of Regression Function",
"sec_num": "3.2"
},
{
"text": "Yet another interesting aspect of this study is to find out a set of key features that play important roles in document retrieval, that is, the set of features that explain most of the variance of function r. This task can usually be done in ways fully-addressed in regression diagnostics and subset selection, each with varying degrees of complexity. One common method is to apply correlation analysis over the response and each predictor, and look for highly-correlated predictor-response pairs.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Correlation between Feature and MAP",
"sec_num": "3.3"
},
{
"text": "Three standard correlation coefficients are involved, including Pearson's product-moment correlation coefficient, Kendall's tau, and Spearman's rho. The results are given in Fig. 3 , where x-coordinate denotes features and y-coordinate denotes the value of correlation coefficient. From Fig. 3 , two context features, \"cosine\" and \"cosineinc\", are found to be positively-and highly-correlated (\u03c1>0.5) with MAP, under Pearson's coefficient. The correlation between the term-term context feature (cosine) and MAP even climbs up to 0.8.",
"cite_spans": [],
"ref_spans": [
{
"start": 174,
"end": 180,
"text": "Fig. 3",
"ref_id": null
},
{
"start": 287,
"end": 293,
"text": "Fig. 3",
"ref_id": null
}
],
"eq_spans": [],
"section": "Correlation between Feature and MAP",
"sec_num": "3.3"
},
{
"text": "For any query term, high context feature value indicates high deviation in the result set caused by removal of the term from the query topic. The findings suggest that the drastic changes incurred in document ranking by removal of a term can be a good predictor. The tradeoff is the high cost in feature computation because a retrieval processing is required.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Correlation between Feature and MAP",
"sec_num": "3.3"
},
{
"text": "The co-occurrence features such as PMI, LLR, and \u03c7 2 also behave obviously correlated to MAP. The minimum value of LLR correlates more strongly to MAP than the maximum one does, which means that the independence between query terms is a useful feature.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Correlation between Feature and MAP",
"sec_num": "3.3"
},
{
"text": "In the linguistic side, we find that two features \"size\" and \"phrase\" show positive, medium-degree correlation (0.3<\u03c1<0.5) with MAP. Intuitively, a longer term might naturally be more useful as a query term than a shorter one is; this may not always be the case, but generally it is believed a shorter term is less informative due to the ambiguity it encompasses.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Fig. 3. Three correlation values between features and MAP on Okapi retrieval model",
"sec_num": null
},
{
"text": "The same rationale also applies to \"phrase\", because terms of noun phrases usually refer to a real-world event, such as \"911 attack\" and \"4th of July\", which might turn out to be the key of the topic. We also notice that some features, such as \"noun\" and \"verb\", pose positive influence to MAP than others do, which shows high concordance to a common thought in NLP that nouns and verbs are more informative than other type of words. To our surprises, NE features such as \"person\", \"geo\", \"org\" and \"time\" do not show as high concordance as the others. This might be resulted from that the training data is not sufficient enough. Features \"idf\" and \"m-SCS\" whose correlation is highly notable have positive impacts. It supports that the statistical features have higher correlation values than the linguistics ones.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Fig. 3. Three correlation values between features and MAP on Okapi retrieval model",
"sec_num": null
},
{
"text": "In this section, we devise experiments for testing the proposed query formulation algorithms. two parts: the first part is a 5-fold cross-validation on NTCIR-4 dataset, and in the second part we train the models on NTCIR-4 and test them on NTCIR-5. As both parts differ only in assignment of the training/test data, we will stick with the details for the first half (cross-validation) in the following text.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation on Information Retrieval",
"sec_num": "3.4"
},
{
"text": "The result is given in Table 4 . Evaluation results on NTCIR-4 and NTCIR-5 are presented in the upper-and lower-half of the table, respectively. We offer two baseline methods in the experiments: \"BL1\" puts together all the query terms into one query string, while \"BL2\" only consider nouns as query terms since nouns are claimed to be more informative in several previous works. Besides, the upper bound UB is presented in the benchmark: for each topic, we permute all sub queries and discover the sub-query with the highest MAP. As term selection can also be treated as a classification problem, we use the same features of our regression function r to train two SVM classifiers, Gen-C and Red-C. Gen-C selects terms classified as \"effective\" while Red-C removes terms classified as \"ineffective\". Gen-R and",
"cite_spans": [],
"ref_spans": [
{
"start": 23,
"end": 30,
"text": "Table 4",
"ref_id": "TABREF6"
}
],
"eq_spans": [],
"section": "Evaluation on Information Retrieval",
"sec_num": "3.4"
},
{
"text": "Red-R denote our Generation and Reduction algorithms, respectively. The retrieval results",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation on Information Retrieval",
"sec_num": "3.4"
},
{
"text": "are presented in terms of MAP. Gain ratios in MAP with respect to the two baseline methods are given in average results. We use two-tailed t-distribution in the significance test for each method (against the BL1) by viewing AP values obtained in all query session as data points, with p<0.01 marked ** and p<0.05 marked *. Table 4 , the MAP difference between two baseline methods is small. This might be because some nouns are still noisy for IR. The four generation and reduction methods significantly outperform the baseline methods. We improve the baseline methods by 5.60% to 11.9% in the cross-validation runs and on NTCIR-5 data. This result shows the robustness and reliability of the proposed algorithms. Furthermore, all the methods show significant improvements when applied to certain retrieval models, such as Indri and TFIDF;",
"cite_spans": [],
"ref_spans": [
{
"start": 323,
"end": 330,
"text": "Table 4",
"ref_id": "TABREF6"
}
],
"eq_spans": [],
"section": "Evaluation on Information Retrieval",
"sec_num": "3.4"
},
{
"text": "performance gain with Okapi model is less significant on NTCIR-5 data, especially when reduction algorithm is called for. The regression methods generally achieve better MAP than the classification methods. This is because the regression methods always select the most informative terms or drop the most ineffective terms among those that are not selected yet.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation on Information Retrieval",
"sec_num": "3.4"
},
{
"text": "The encouraging evaluation results show that, despite the additional costs on iterative processing, the performance of the proposed algorithms is effective across different benchmark collections, and based on a query term space T, the algorithms are capable of suggesting better ways to form a query.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation on Information Retrieval",
"sec_num": "3.4"
},
{
"text": "[4] proposed a method for selecting Good Expansion Terms (GET) based on an SVM classifier. Our approach is also applicable to selection of query expansion terms. Given the same set of candidate expansion terms which are generated by conventional approaches such as TF and IDF, GET-C runs the Gen-C method whereas GET-R runs the Gen-R on the expansion set (with the NTCIR-4 5-fold cross validation regression model). Table 5 shows the MAP results of the two methods and the baseline method (BL), which adds all expansion terms to original queries. From Table 5 , GET-R outperforms GET-C under different retrieval models and data sets, and both methods improve MAP by 1.76% to 3.44% compared to the baseline. Moreover, though extra terms are introduced for query formulation, we can see that certain MAP results in Table 4 still outperform those in Table 5 (marked italic). It is therefore inferred that, it is still important to filter out noisy terms in original query even though good expansion terms are selected. Finally, note that we use the NTCIR-4 5-fold cross validation regression model, which is trained to fit the target performance gain in NTCIR-4 dataset, rather than instances in the query expansion terms set. However, results in Table 5 show that this model works satisfactorily in selection of good expansion terms, which ensures that our approach is robust in different environments and applications such as query expansion. We further investigate the impact of various ranking schemes based on our proposed algorithms. The ranking scheme in the Generation algorithm (or the Reduction algorithm)",
"cite_spans": [],
"ref_spans": [
{
"start": 416,
"end": 423,
"text": "Table 5",
"ref_id": "TABREF7"
},
{
"start": 552,
"end": 559,
"text": "Table 5",
"ref_id": "TABREF7"
},
{
"start": 813,
"end": 820,
"text": "Table 4",
"ref_id": "TABREF6"
},
{
"start": 847,
"end": 854,
"text": "Table 5",
"ref_id": "TABREF7"
},
{
"start": 1244,
"end": 1251,
"text": "Table 5",
"ref_id": "TABREF7"
}
],
"eq_spans": [],
"section": "Evaluation on Information Retrieval",
"sec_num": "3.4"
},
{
"text": "refers to an internal ranking mechanism that decides which term shall be included in (or discarded away). Three types of ranking schemes are tested based on our regression function r. \"max-order\" always returns the term that is most likely to contribute relevance to a query topic, \"min-order\" returns the term that is most likely to bring in noise, and \"random-order\" returns a randomly-chosen term. Figure 4 shows the MAP curve for each scheme by connecting the dots at (1, MAP (1) ), \u2026 , (n, MAP (n) ), where MAP (i) is the MAP obtained at iteration i. It tells that the performance curves in the generation process share an interesting tendency: the curves keep going up in first few iterations, while after the maximum (locally to each method) is reached, they begin to go down rapidly. The findings might informally establish the validity of our assumption that a longer query topic might encompass more noise terms. The same \"up-and-down\" pattern does not look so obvious in the reduction process; however, if we take the derivative of the curve at each iteration i (i.e., the performance gain/loss ratio), we might find it resembles the pattern we have discovered. We may also find that, in the generation process, different ranking schemes come with varying degrees of MAP gains. The ranking scheme \"max-order\" constantly provides the largest performance boost, as opposed to the other two schemes. In the reduction process, \"max-order\" also offers the most drastically performance drop than the other two schemes do.",
"cite_spans": [],
"ref_spans": [
{
"start": 401,
"end": 409,
"text": "Figure 4",
"ref_id": "FIGREF3"
}
],
"eq_spans": [],
"section": "Evaluation on Information Retrieval",
"sec_num": "3.4"
},
{
"text": "Generally, in the generation process, the best MAP value for each setting might take place somewhere between iteration n/2 to 2n/3, given n is the size of the query topic. ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation on Information Retrieval",
"sec_num": "3.4"
},
{
"text": "In this paper, we propose an approach to measure and predict the impact of query terms, based on the discovery of linguistic, co-occurrence, and contextual features, which are analyzed by their correlation with MAP. Experimental results show that our query formulation approach significantly improves retrieval performance.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussions and Conclusions",
"sec_num": "4."
},
{
"text": "The proposed method is robust and the experimental results are consistent on different retrieval models and document collections. In addition, an important aspect of this paper is that we are able to capture certain characteristics of query terms that are highly effective for IR. Aside from intuitive ideas that informative terms are often lengthy and tagged nouns as their POS category, we have found that the statistical features are more likely to decide the effectiveness of query terms than linguistics ones do. We also observe that context features are mostly correlated to MAP and thus are most powerful for term difficulty prediction.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussions and Conclusions",
"sec_num": "4."
},
{
"text": "However, such post-retrieval features require much higher cost than the pre-retrieval features, in terms of time and space.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussions and Conclusions",
"sec_num": "4."
},
{
"text": "The proposed approach actually selects local optimal query term during each iteration of generation or reduction. The reason for this greedy algorithm is that it is inappropriate to exhaustively enumerate all sub-queries for online applications such as search engines. Further, it is challenging to automatically determine the value of parameter k in our algorithms, which is selected to optimize the MAP of each query topic. Also, when applying our approach to web applications, we need web corpus to calculate the statistical features for training models. ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussions and Conclusions",
"sec_num": "4."
}
],
"back_matter": [],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Selecting good expansion terms for pseudo-relevance feedback",
"authors": [
{
"first": "G",
"middle": [],
"last": "Cao",
"suffix": ""
},
{
"first": "J",
"middle": [
"Y"
],
"last": "Nie",
"suffix": ""
},
{
"first": "J",
"middle": [
"F"
],
"last": "Gao",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "Robertson",
"suffix": ""
}
],
"year": 2008,
"venue": "31st annual international ACM SIGIR conference on Research and development in information retrieval",
"volume": "",
"issue": "",
"pages": "243--250",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Cao, G., Nie, J. Y., Gao, J. F., & Robertson, S.: Selecting good expansion terms for pseudo-relevance feedback. In: 31st annual international ACM SIGIR conference on Research and development in information retrieval, pp. 243--250 (2008)",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "SIGIR WORKSHOP REPORT: Predicting Query Difficulty -Methods and Applications. WORKSHOP SESSION: SIGIR",
"authors": [
{
"first": "D",
"middle": [],
"last": "Carmel",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Yom-Tov",
"suffix": ""
},
{
"first": "I",
"middle": [],
"last": "Soboroff",
"suffix": ""
}
],
"year": 2005,
"venue": "",
"volume": "",
"issue": "",
"pages": "25--28",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Carmel, D., Yom-Tov, E., Soboroff, I.: SIGIR WORKSHOP REPORT: Predicting Query Difficulty -Methods and Applications. WORKSHOP SESSION: SIGIR, pp. 25--28 (2005)",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "What makes a query difficult?",
"authors": [
{
"first": "D",
"middle": [],
"last": "Carmel",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Yom-Tov",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Darlow",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "Pelleg",
"suffix": ""
}
],
"year": 2006,
"venue": "29th annual international ACM SIGIR",
"volume": "",
"issue": "",
"pages": "390--397",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Carmel, D., Yom-Tov, E., Darlow, A., Pelleg, D.: What makes a query difficult? In: 29th annual international ACM SIGIR, pp. 390--397 (2006)",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Automatic Query Refinement using Lexical Affinities with Maximal Information Gain",
"authors": [
{
"first": "D",
"middle": [],
"last": "Carmel",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Farchi",
"suffix": ""
},
{
"first": "Y",
"middle": [],
"last": "Petruschka",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Soffer",
"suffix": ""
}
],
"year": 2002,
"venue": "25th annual international ACM SIGIR",
"volume": "",
"issue": "",
"pages": "283--290",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Carmel, D., Farchi, E., Petruschka, Y., Soffer, A.: Automatic Query Refinement using Lexical Affinities with Maximal Information Gain. In: 25th annual international ACM SIGIR, pp. 283--290 (2002)",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "LIBSVM",
"authors": [
{
"first": "C",
"middle": [
"C"
],
"last": "Chang",
"suffix": ""
},
{
"first": "C",
"middle": [
"J"
],
"last": "Lin",
"suffix": ""
}
],
"year": 2001,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Chang, C. C., Lin, C. J.: LIBSVM: http://www.csie.ntu.edu.tw/~cjlin/libsvm (2001)",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Inferring query performance using pre-retrieval predictors",
"authors": [
{
"first": "B",
"middle": [],
"last": "He",
"suffix": ""
},
{
"first": "I",
"middle": [],
"last": "Ounis",
"suffix": ""
}
],
"year": 2004,
"venue": "11th International Conference of String Processing and Information Retrieval",
"volume": "",
"issue": "",
"pages": "43--54",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "He, B., Ounis, I.: Inferring query performance using pre-retrieval predictors. In: 11th International Conference of String Processing and Information Retrieval, pp. 43--54 (2004)",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Query Word Deletion Prediction",
"authors": [
{
"first": "R",
"middle": [],
"last": "Jones",
"suffix": ""
},
{
"first": "D",
"middle": [
"C"
],
"last": "Fain",
"suffix": ""
}
],
"year": 2003,
"venue": "26th annual international ACM SIGIR",
"volume": "",
"issue": "",
"pages": "435--436",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jones, R., Fain, D. C.: Query Word Deletion Prediction. In: 26th annual international ACM SIGIR, pp. 435--436 (2003)",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Effective and efficient user interaction for long queries",
"authors": [
{
"first": "G",
"middle": [],
"last": "Kumaran",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Allan",
"suffix": ""
}
],
"year": 2008,
"venue": "31st annual international ACM SIGIR",
"volume": "",
"issue": "",
"pages": "11--18",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Kumaran, G., Allan, J.: Effective and efficient user interaction for long queries. In: 31st annual international ACM SIGIR, pp. 11--18 (2008)",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Adapting information retrieval systems to user queries",
"authors": [
{
"first": "G",
"middle": [],
"last": "Kumaran",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Allan",
"suffix": ""
}
],
"year": 2008,
"venue": "Information Processing and Management",
"volume": "",
"issue": "",
"pages": "1838--1862",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Kumaran, G., Allan, J.: Adapting information retrieval systems to user queries. In: Information Processing and Management, pp. 1838-1862 (2008)",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "A New Method of Weighting Query Terms for Ad-hoc Retrieval",
"authors": [
{
"first": "K",
"middle": [],
"last": "Kwok",
"suffix": ""
},
{
"first": "L",
"middle": [],
"last": "",
"suffix": ""
}
],
"year": 1996,
"venue": "19th annual international ACM SIGIR",
"volume": "",
"issue": "",
"pages": "187--195",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Kwok, K., L.: A New Method of Weighting Query Terms for Ad-hoc Retrieval. In: 19th annual international ACM SIGIR, pp. 187--195 (1996)",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Examining the Content Load of Part of Speech Blocks for Information Retrieval",
"authors": [
{
"first": "C",
"middle": [],
"last": "Lioma",
"suffix": ""
},
{
"first": "I",
"middle": [],
"last": "Ounis",
"suffix": ""
}
],
"year": 2006,
"venue": "COLING/ACL 2006 Main Conference Poster Sessions",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Lioma, C., Ounis, I.: Examining the Content Load of Part of Speech Blocks for Information Retrieval. In: COLING/ACL 2006 Main Conference Poster Sessions (2006)",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Linguistic and Statistical Analysis of the CLEF Topics",
"authors": [
{
"first": "T",
"middle": [],
"last": "Mandl",
"suffix": ""
},
{
"first": "C",
"middle": [],
"last": "Womser-Hacker",
"suffix": ""
}
],
"year": 2002,
"venue": "Third Workshop of the Cross-Language Evaluation Forum CLEF",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mandl,T., Womser-Hacker, C.: Linguistic and Statistical Analysis of the CLEF Topics. In: Third Workshop of the Cross-Language Evaluation Forum CLEF (2002)",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Tanguy, L: ACM SIGIR 2005 Workshop on Predicting Query Difficulty -Methods and Applications",
"authors": [
{
"first": "J",
"middle": [],
"last": "Mothe",
"suffix": ""
}
],
"year": 2005,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mothe, J., Tanguy, L: ACM SIGIR 2005 Workshop on Predicting Query Difficulty - Methods and Applications (2005)",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Statistical Learning Theory",
"authors": [
{
"first": "V",
"middle": [
"N"
],
"last": "Vapnik",
"suffix": ""
}
],
"year": 1998,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Vapnik, V. N.: Statistical Learning Theory. John Wiley & Sons (1998)",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Juru at TREC 2004: Experiments with Prediction of Query Difficulty",
"authors": [
{
"first": "E",
"middle": [],
"last": "Yom-Tov",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "Fine",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "Carmel",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Darlow",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Amitay",
"suffix": ""
}
],
"year": 2004,
"venue": "13th Text Retrieval Conference",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yom-Tov, E., Fine, S., Carmel, D., Darlow, A., Amitay, E.: Juru at TREC 2004: Experiments with Prediction of Query Difficulty. In: 13th Text Retrieval Conference (2004)",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "Query Performance Prediction in Web Search Environments",
"authors": [
{
"first": "Y",
"middle": [],
"last": "Zhou",
"suffix": ""
},
{
"first": "W",
"middle": [
"B"
],
"last": "Croft",
"suffix": ""
}
],
"year": 2007,
"venue": "30th Annual International ACM SIGIR Conference",
"volume": "",
"issue": "",
"pages": "543--550",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Zhou, Y., and Croft, W. B.: Query Performance Prediction in Web Search Environments. In: 30th Annual International ACM SIGIR Conference, pp. 543--550 (2007)",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "Ranking Robustness: A Novel Framework to Predict Query Performance",
"authors": [
{
"first": "Y",
"middle": [],
"last": "Zhou",
"suffix": ""
},
{
"first": "W",
"middle": [
"B"
],
"last": "Croft",
"suffix": ""
}
],
"year": 2006,
"venue": "15th ACM international conference on Information and knowledge management",
"volume": "",
"issue": "",
"pages": "567--574",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Zhou, Y., Croft, W. B.: Ranking Robustness: A Novel Framework to Predict Query Performance. In: 15th ACM international conference on Information and knowledge management, pp. 567--574 (2006)",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"num": null,
"type_str": "figure",
"uris": null,
"text": "MAP gain by removing terms from original NTCIR-4 description queries."
},
"FIGREF1": {
"num": null,
"type_str": "figure",
"uris": null,
"text": "The Generation Algorithm and the Reduction Algorithm2.5 Features Used for Term SelectionLinguistic and statistical features provide important clues for selection of good query terms from viewpoints of users and collections, and we use them to train function r."
},
"FIGREF3": {
"num": null,
"type_str": "figure",
"uris": null,
"text": "MAP curves based on regression model for description queries of NTCIR-4 on Indri, TFIDF, and Okapi models, each with three selection order. X coordinate is # of query terms; Y coordinate is MAP."
},
"FIGREF4": {
"num": null,
"type_str": "figure",
"uris": null,
"text": "Allan, J., Callan, J., Croft, W. B., Ballesteros, L., Broglio, J., Xu, J., Shu, H.: INQUERY at TREC-5. In: Fifth Text REtrieval Conference (TREC-5), pp. 119--132 (1997) [2] Amati, G., Carpineto, C., Romano, G.: Query Difficulty, Robustness, and Selective Application of Query Expansion. In: 26th European Conference on IR Research, UK (2004) [3] Bendersky M., Croft, W. B.: Discovering key concepts in verbose queries. In: 31st annual international ACM SIGIR conference on Research and development in information retrieval,"
},
"TABREF0": {
"text": "we remove one term from each of the 50 topics {T}, in average, 46 topics have negative influence, i.e., g avg (T)<0. This means",
"num": null,
"type_str": "table",
"content": "<table><tr><td>that deleting one term from T mostly leads to a negative impact on MAP, compared to</td></tr><tr><td>original T. On the other hand, g max (T)>0 shows that at least the removal of one term</td></tr><tr><td>positively improves MAP. By removing such terms we can obtain better performance. The</td></tr><tr><td>phenomenon appears in 35 out of 50 topics, which is statistically suggestive that there exists</td></tr><tr><td>noisy terms in most of user-constructed queries. In short, removing different terms from each</td></tr><tr><td>topic T causes MAP variation in different levels. Some query terms are highly</td></tr><tr><td>information-bearing, while others might hurt MAP. It is worth mentioned that we conduct the</td></tr><tr><td>same experiment with the Indri and TFIDF retrieval models using the Lemur toolkit [21]. The</td></tr><tr><td>results are quite consistent over different models. This characteristic makes it possible for the</td></tr><tr><td>effectiveness of a query term on IR to be learned and applied to query formulation.</td></tr></table>",
"html": null
},
"TABREF2": {
"text": "Adopted dataset after data clean. Number of each setting is shown in each row for",
"num": null,
"type_str": "table",
"content": "<table><tr><td>NTCIR-4 and NTCIR-5</td><td/><td/></tr><tr><td/><td/><td>NTCIR-4</td><td>NTCIR-5</td></tr><tr><td/><td/><td><desc></td><td><desc></td></tr><tr><td/><td>#(query topics)</td><td>58</td><td>47</td></tr><tr><td/><td>#(distinct terms)</td><td>865</td><td>623</td></tr><tr><td/><td>#(terms/query)</td><td>14.9</td><td>13.2</td></tr><tr><td colspan=\"4\">Table 2. Number of training instances. (x : y) shows the number of positive and negative</td></tr><tr><td colspan=\"3\">MAP gain instances are x and y, respectively</td></tr><tr><td/><td>Indri</td><td>TFIDF</td><td>Okapi</td></tr><tr><td>Original</td><td colspan=\"2\">674(156:518) 702(222:480)</td><td>687(224:463</td></tr><tr><td colspan=\"2\">Upsample 1036(518:51</td><td>960(480:480)</td><td>) 926(463:463</td></tr><tr><td>Train</td><td colspan=\"2\">8) 828(414:414) 768(384:384)</td><td>) 740(370:370</td></tr><tr><td>Test</td><td colspan=\"2\">208(104:104) 192(96:96)</td><td>) 186 (93:93)</td></tr></table>",
"html": null
},
"TABREF3": {
"text": "R 2 of regression model r with multiple combinations of training features.",
"num": null,
"type_str": "table",
"content": "<table><tr><td>L:</td></tr></table>",
"html": null
},
"TABREF4": {
"text": "",
"num": null,
"type_str": "table",
"content": "<table/>",
"html": null
},
"TABREF5": {
"text": "The benchmark collections are NTCIR-4 and NTCIR-5. The experiments can be divided into",
"num": null,
"type_str": "table",
"content": "<table><tr><td>0 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 1</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>acronym</td><td>noun</td><td>verb</td><td>adj pearson adv person</td><td>org</td><td>geo</td><td>time kendall size</td><td>phrase</td><td>llr</td><td>llrmin spearman llrmax llrmin_r</td><td>llrmax_r</td><td>pmi</td><td>pmiinc</td><td>pmimin</td><td>pmimax</td><td>pmimin_r</td><td>pmimax_r</td><td>x2</td><td>x2inc</td><td>x2min</td><td>x2max</td><td>x2min_r</td><td>x2_max_r</td><td>tf</td><td>idf</td><td>cosine</td><td>cosineinc</td><td>cosine_min</td><td>cosine_max</td><td>cosine_min_r</td><td>cosine_max_r</td><td>m_Cl</td><td>m_SCS</td></tr></table>",
"html": null
},
"TABREF6": {
"text": "MAP of baseline and multiple proposed methods on NTCIR-4 <desc> regression model. (+x, +y) shows the improvement percentage of MAP corresponding to BL1 and BL2. TFIDF and Okapi models have PRF involved, Indri model does not. Best MAP of each retrieval model is marked bold for both collections.",
"num": null,
"type_str": "table",
"content": "<table><tr><td colspan=\"2\">Settings Metho</td><td>Indri</td><td>TFIDF</td><td>Okapi</td><td>Avg.</td></tr><tr><td>NTCIR-</td><td>d UB</td><td>0.2233</td><td>0.3052</td><td>0.3234</td><td>0.2839</td></tr><tr><td>4</td><td>BL1</td><td>0.1742</td><td>0.2660</td><td>0.2718</td><td>0.2373</td></tr><tr><td><desc></td><td>BL2</td><td>0.1773</td><td>0.2622</td><td>0.2603</td><td>0.2332</td></tr><tr><td>Queries</td><td>Gen-C</td><td>0.1949</td><td>0.2823</td><td>0.2946</td><td>0.2572(+8.38%,+10.2)</td></tr><tr><td/><td>Gen-R</td><td>** 0.1954</td><td>** 0.2861</td><td>** 0.2875</td><td>%) 0.2563(+8.00%,+9.90)</td></tr><tr><td/><td>Red-C</td><td>** 0.1911*</td><td>** 0.2755</td><td>* 0.2854</td><td>%) 0.2506(+5.60%,+7.46)</td></tr><tr><td/><td>Red-R</td><td>* 0.1974</td><td>** 0.2773</td><td>** 0.2797</td><td>%) 0.2514(+5.94%,+7.80)</td></tr><tr><td>NTCIR-</td><td>UB</td><td>** 0.1883</td><td>** 0.2245</td><td>0.2420</td><td>%) 0.2182</td></tr><tr><td>5</td><td>BL1</td><td>0.1523</td><td>0.1988</td><td>0.1997</td><td>0.1836</td></tr><tr><td><desc></td><td>BL2</td><td>0.1543</td><td>0.2035</td><td>0.1969</td><td>0.1849</td></tr><tr><td>Queries</td><td>Gen-C</td><td>0.1699</td><td>0.2117*</td><td>0.2213</td><td>0.2009(+9.42%,+8.65)</td></tr><tr><td/><td>Gen-R</td><td>** 0.1712</td><td>0.2221</td><td>* 0.2232</td><td>%) 0.2055(+11.9%,+11.1)</td></tr><tr><td/><td>Red-C</td><td>** 0.1645</td><td>* 0.2194</td><td>* 0.2084</td><td>%) 0.1974(+7.51%,+6.76)</td></tr><tr><td/><td>Red-R</td><td>** 0.1749</td><td>* 0.2034</td><td>0.2160</td><td>%) 0.1981(+7.89%,+7.13)</td></tr><tr><td>From</td><td/><td>**</td><td>**</td><td>*</td><td>%)</td></tr></table>",
"html": null
},
"TABREF7": {
"text": "MAP of query expansion based on GET-C and GET-R model. (%) shows the improvement percentage of MAP to BL. Significance test is tested against the baseline results.",
"num": null,
"type_str": "table",
"content": "<table><tr><td>Settings</td><td>Method</td><td>Indri</td><td>TFIDF</td><td>Okapi</td><td>Avg.</td></tr><tr><td>NTCIR-4</td><td>BL</td><td>0.2470</td><td>0.2642</td><td>0.2632</td><td>0.2581</td></tr><tr><td><desc></td><td>GET-C</td><td>0.2472**</td><td>0.2810**</td><td>0.2728**</td><td>0.2670</td></tr><tr><td/><td>GET-R</td><td>0.2610**</td><td>0.2860**</td><td>0.2899**</td><td>(+3.44%) 0.2789</td></tr><tr><td>NTCIR-5</td><td>BL</td><td>0.1795</td><td>0.1891</td><td>0.1913</td><td>(+8.05%) 0.1866</td></tr><tr><td><desc></td><td>GET-C</td><td>0.1868</td><td>0.1904</td><td>0.1927</td><td>0.1899</td></tr><tr><td/><td>GET-R</td><td>0.1880*</td><td>0.1918*</td><td>0.1945*</td><td>(+1.76%) 0.1914</td></tr><tr><td/><td/><td/><td/><td/><td>(+2.57%)</td></tr></table>",
"html": null
}
}
}
} |