File size: 88,175 Bytes
6fa4bc9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 |
{
"paper_id": "I08-1042",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T07:40:22.268212Z"
},
"title": "Heterogeneous Automatic MT Evaluation Through Non-Parametric Metric Combinations",
"authors": [
{
"first": "Jes\u00fas",
"middle": [],
"last": "Gim\u00e9nez",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "LSI Department Universitat Polit\u00e8cnica de Catalunya Jordi Girona",
"location": {
"addrLine": "Salgado 1-3",
"postCode": "E-08034",
"settlement": "Barcelona"
}
},
"email": "jgimenez@lsi.upc.edu"
},
{
"first": "Llu\u00eds",
"middle": [],
"last": "M\u00e0rquez",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "LSI Department Universitat Polit\u00e8cnica de Catalunya Jordi Girona",
"location": {
"addrLine": "Salgado 1-3",
"postCode": "E-08034",
"settlement": "Barcelona"
}
},
"email": "lluism@lsi.upc.edu"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "Combining different metrics into a single measure of quality seems the most direct and natural way to improve over the quality of individual metrics. Recently, several approaches have been suggested (Kulesza and Shieber, 2004; Liu and Gildea, 2007; Albrecht and Hwa, 2007a). Although based on different assumptions, these approaches share the common characteristic of being parametric. Their models involve a number of parameters whose weight must be adjusted. As an alternative, in this work, we study the behaviour of non-parametric schemes, in which metrics are combined without having to adjust their relative importance. Besides, rather than limiting to the lexical dimension, we work on a wide set of metrics operating at different linguistic levels (e.g., lexical, syntactic and semantic). Experimental results show that non-parametric methods are a valid means of putting different quality dimensions together, thus tracing a possible path towards heterogeneous automatic MT evaluation.",
"pdf_parse": {
"paper_id": "I08-1042",
"_pdf_hash": "",
"abstract": [
{
"text": "Combining different metrics into a single measure of quality seems the most direct and natural way to improve over the quality of individual metrics. Recently, several approaches have been suggested (Kulesza and Shieber, 2004; Liu and Gildea, 2007; Albrecht and Hwa, 2007a). Although based on different assumptions, these approaches share the common characteristic of being parametric. Their models involve a number of parameters whose weight must be adjusted. As an alternative, in this work, we study the behaviour of non-parametric schemes, in which metrics are combined without having to adjust their relative importance. Besides, rather than limiting to the lexical dimension, we work on a wide set of metrics operating at different linguistic levels (e.g., lexical, syntactic and semantic). Experimental results show that non-parametric methods are a valid means of putting different quality dimensions together, thus tracing a possible path towards heterogeneous automatic MT evaluation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Automatic evaluation metrics have notably accelerated the development cycle of MT systems in the last decade. There exist a large number of metrics based on different similarity criteria. By far, the most widely used metric in recent literature is BLEU (Papineni et al., 2001) . Other well-known metrics are WER (Nie\u00dfen et al., 2000) , NIST (Doddington, 2002) , GTM (Melamed et al., 2003) , ROUGE (Lin and Och, 2004a) , METEOR (Banerjee and Lavie, 2005) , and TER (Snover et al., 2006) , just to name a few. All these metrics take into account information at the lexical level 1 , and, therefore, their reliability depends very strongly on the heterogeneity/representativity of the set of reference translations available (Culy and Riehemann, 2003) . In order to overcome this limitation several authors have suggested taking advantage of paraphrasing support (Zhou et al., 2006; Kauchak and Barzilay, 2006; Owczarzak et al., 2006) . Other authors have tried to exploit information at deeper linguistic levels. For instance, we may find metrics based on full constituent parsing (Liu and Gildea, 2005) , and on dependency parsing (Liu and Gildea, 2005; Amig\u00f3 et al., 2006; Mehay and Brew, 2007; Owczarzak et al., 2007) . We may find also metrics at the level of shallow-semantics, e.g., over semantic roles and named entities (Gim\u00e9nez and M\u00e0rquez, 2007) , and at the properly semantic level, e.g., over discourse representations (Gim\u00e9nez, 2007) .",
"cite_spans": [
{
"start": 253,
"end": 276,
"text": "(Papineni et al., 2001)",
"ref_id": "BIBREF25"
},
{
"start": 312,
"end": 333,
"text": "(Nie\u00dfen et al., 2000)",
"ref_id": "BIBREF22"
},
{
"start": 336,
"end": 340,
"text": "NIST",
"ref_id": null
},
{
"start": 341,
"end": 359,
"text": "(Doddington, 2002)",
"ref_id": "BIBREF8"
},
{
"start": 362,
"end": 365,
"text": "GTM",
"ref_id": null
},
{
"start": 366,
"end": 388,
"text": "(Melamed et al., 2003)",
"ref_id": "BIBREF21"
},
{
"start": 391,
"end": 396,
"text": "ROUGE",
"ref_id": null
},
{
"start": 397,
"end": 417,
"text": "(Lin and Och, 2004a)",
"ref_id": "BIBREF16"
},
{
"start": 427,
"end": 453,
"text": "(Banerjee and Lavie, 2005)",
"ref_id": "BIBREF4"
},
{
"start": 464,
"end": 485,
"text": "(Snover et al., 2006)",
"ref_id": "BIBREF27"
},
{
"start": 722,
"end": 748,
"text": "(Culy and Riehemann, 2003)",
"ref_id": "BIBREF7"
},
{
"start": 860,
"end": 879,
"text": "(Zhou et al., 2006;",
"ref_id": "BIBREF28"
},
{
"start": 880,
"end": 907,
"text": "Kauchak and Barzilay, 2006;",
"ref_id": "BIBREF12"
},
{
"start": 908,
"end": 931,
"text": "Owczarzak et al., 2006)",
"ref_id": "BIBREF23"
},
{
"start": 1079,
"end": 1101,
"text": "(Liu and Gildea, 2005)",
"ref_id": "BIBREF18"
},
{
"start": 1130,
"end": 1152,
"text": "(Liu and Gildea, 2005;",
"ref_id": "BIBREF18"
},
{
"start": 1153,
"end": 1172,
"text": "Amig\u00f3 et al., 2006;",
"ref_id": "BIBREF3"
},
{
"start": 1173,
"end": 1194,
"text": "Mehay and Brew, 2007;",
"ref_id": "BIBREF20"
},
{
"start": 1195,
"end": 1218,
"text": "Owczarzak et al., 2007)",
"ref_id": "BIBREF24"
},
{
"start": 1326,
"end": 1353,
"text": "(Gim\u00e9nez and M\u00e0rquez, 2007)",
"ref_id": "BIBREF10"
},
{
"start": 1429,
"end": 1444,
"text": "(Gim\u00e9nez, 2007)",
"ref_id": "BIBREF11"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "However, none of current metrics provides, in isolation, a global measure of quality. Indeed, all metrics focus on partial aspects of quality. The main problem of relying on partial metrics is that we may obtain biased evaluations, which may lead us to derive inaccurate conclusions. For instance, Callison-Burch et al. (2006) and Koehn and Monz (2006) have recently reported several problematic cases related to the automatic evaluation of systems oriented towards maximizing different quality aspects. Corroborating the findings by Culy and Riehemann (2003) , they showed that BLEU overrates SMT systems with respect to other types of systems, such as rule-based, or human-aided. The reason is that SMT systems are likelier to match the sublanguage (e.g., lexical choice and order) represented by the set of reference translations. We argue that, in order to perform more robust, i.e., less biased, automatic MT evaluations, different quality dimensions should be jointly taken into account.",
"cite_spans": [
{
"start": 298,
"end": 326,
"text": "Callison-Burch et al. (2006)",
"ref_id": "BIBREF5"
},
{
"start": 331,
"end": 352,
"text": "Koehn and Monz (2006)",
"ref_id": "BIBREF13"
},
{
"start": 534,
"end": 559,
"text": "Culy and Riehemann (2003)",
"ref_id": "BIBREF7"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "A natural solution to this challenge consists in combining the scores conferred by different metrics, ideally covering a heterogeneous set of quality aspects. In the last few years, several approaches to metric combination have been suggested (Kulesza and Shieber, 2004; Liu and Gildea, 2007; Albrecht and Hwa, 2007a) . In spite of working on a limited set of quality aspects, mostly lexical features, these approaches have provided effective means of combining different metrics into a single measure of quality. All these methods implement a parametric combination scheme. Their models involve a number of parameters whose weight must be adjusted (see further details in Section 2).",
"cite_spans": [
{
"start": 243,
"end": 270,
"text": "(Kulesza and Shieber, 2004;",
"ref_id": "BIBREF14"
},
{
"start": 271,
"end": 292,
"text": "Liu and Gildea, 2007;",
"ref_id": "BIBREF19"
},
{
"start": 293,
"end": 317,
"text": "Albrecht and Hwa, 2007a)",
"ref_id": "BIBREF0"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "As an alternative path towards heterogeneous MT evaluation, in this work, we explore the possibility of relying on non-parametric combination schemes, in which metrics are combined without having to adjust their relative importance (see Section 3). We have studied their ability to integrate a wide set of metrics operating at different linguistic levels (e.g., lexical, syntactic and semantic) over several evaluation scenarios (see Section 4). We show that nonparametric schemes offer a valid means of putting different quality dimensions together, effectively yielding a significantly improved evaluation quality, both in terms of human likeness and human acceptability. We have also verified that these methods port well across test beds.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Approaches to metric combination require two important ingredients:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "Combination Scheme, i.e., how to combine several metric scores into a single score. As pointed out in Section 1, we distinguish between parametric and non-parametric schemes.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "Meta-Evaluation Criterion, i.e., how to evaluate the quality of a metric combination. The two most prominent meta-evaluation criteria are:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "\u2022 Human Acceptability: Metrics are evaluated in terms of their ability to capture the degree of acceptability to humans of automatic translations, i.e., their ability to emulate human assessors. The underlying assumption is that 'good' translations should be acceptable to human evaluators. Human acceptability is usually measured on the basis of correlation between automatic metric scores and human assessments of translation quality 2 . \u2022 Human Likeness: Metrics are evaluated in terms of their ability to capture the features which distinguish human from automatic translations. The underlying assumption is that 'good' translations should resemble human translations. Human likeness is usually measured on the basis of discriminative power (Lin and Och, 2004b; Amig\u00f3 et al., 2005 ).",
"cite_spans": [
{
"start": 745,
"end": 765,
"text": "(Lin and Och, 2004b;",
"ref_id": "BIBREF17"
},
{
"start": 766,
"end": 784,
"text": "Amig\u00f3 et al., 2005",
"ref_id": "BIBREF2"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "In the following, we describe the most relevant approaches to metric combination suggested in recent literature. All are parametric, and most of them are based on machine learning techniques. We distinguish between approaches relying on human likeness and approaches relying on human acceptability.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "The first approach to metric combination based on human likeness was that by Corston-Oliver et al. (2001) who used decision trees to distinguish between human-generated ('good') and machinegenerated ('bad') translations. They focused on evaluating only the well-formedness of automatic translations (i.e., subaspects of fluency), obtaining high levels of classification accuracy. Kulesza and Shieber (2004) extended the approach by Corston-Oliver et al. (2001) to take into account other aspects of quality further than fluency alone. Instead of decision trees, they trained Support Vector Machine (SVM) classifiers. They used features inspired by well-known metrics such as BLEU, NIST, WER, and PER. Metric quality was evaluated both in terms of classification accuracy and correlation with human assessments at the sentence level.",
"cite_spans": [
{
"start": 77,
"end": 105,
"text": "Corston-Oliver et al. (2001)",
"ref_id": "BIBREF6"
},
{
"start": 380,
"end": 406,
"text": "Kulesza and Shieber (2004)",
"ref_id": "BIBREF14"
},
{
"start": 432,
"end": 460,
"text": "Corston-Oliver et al. (2001)",
"ref_id": "BIBREF6"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Approaches based on Human Likeness",
"sec_num": "2.1"
},
{
"text": "A significant improvement with respect to standard individual metrics was reported. Gamon et al. (2005) presented a similar approach which, in addition, had the interesting property that the set of human and automatic translations could be independent, i.e., human translations were not required to correspond, as references, to the set of automatic translations.",
"cite_spans": [
{
"start": 84,
"end": 103,
"text": "Gamon et al. (2005)",
"ref_id": "BIBREF9"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Approaches based on Human Likeness",
"sec_num": "2.1"
},
{
"text": "Quirk 2004applied supervised machine learning algorithms (e.g., perceptrons, SVMs, decision trees, and linear regression) to approximate human quality judgements instead of distinguishing between human and automatic translations. Similarly to the work by Gamon et al. (2005) their approach does not require human references.",
"cite_spans": [
{
"start": 255,
"end": 274,
"text": "Gamon et al. (2005)",
"ref_id": "BIBREF9"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Approaches based on Human Acceptability",
"sec_num": "2.2"
},
{
"text": "More recently, Albrecht and Hwa (2007a; 2007b ) re-examined the SVM classification approach by Kulesza and Shieber (2004) and, inspired by the work of Quirk (2004) , suggested a regression-based learning approach to metric combination, with and without human references. The regression model learns a continuous function that approximates human assessments in training examples.",
"cite_spans": [
{
"start": 15,
"end": 39,
"text": "Albrecht and Hwa (2007a;",
"ref_id": "BIBREF0"
},
{
"start": 40,
"end": 45,
"text": "2007b",
"ref_id": "BIBREF1"
},
{
"start": 95,
"end": 121,
"text": "Kulesza and Shieber (2004)",
"ref_id": "BIBREF14"
},
{
"start": 151,
"end": 163,
"text": "Quirk (2004)",
"ref_id": "BIBREF26"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Approaches based on Human Acceptability",
"sec_num": "2.2"
},
{
"text": "As an alternative to methods based on machine learning techniques, Liu and Gildea (2007) suggested a simpler approach based on linear combinations of metrics. They followed a Maximum Correlation Training, i.e., the weight for the contribution of each metric to the overall score was adjusted so as to maximize the level of correlation with human assessments at the sentence level.",
"cite_spans": [
{
"start": 67,
"end": 88,
"text": "Liu and Gildea (2007)",
"ref_id": "BIBREF19"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Approaches based on Human Acceptability",
"sec_num": "2.2"
},
{
"text": "As expected, all approaches based on human acceptability have been shown to outperform that of Kulesza and Shieber (2004) in terms of human acceptability. However, no results in terms of human likeness have been provided, thus leaving these comparative studies incomplete.",
"cite_spans": [
{
"start": 95,
"end": 121,
"text": "Kulesza and Shieber (2004)",
"ref_id": "BIBREF14"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Approaches based on Human Acceptability",
"sec_num": "2.2"
},
{
"text": "In this section, we provide a brief description of the QARLA framework (Amig\u00f3 et al., 2005) , which is, to our knowledge, the only existing non-parametric approach to metric combination. QARLA is nonparametric because, rather than assigning a weight to the contribution of each metric, the evaluation of a given automatic output a is addressed through a set of independent probabilistic tests (one per metric) in which the goal is to falsify the hypothesis that a is a human reference. The input for QARLA is a set of test cases A (i.e., automatic translations), a set of similarity metrics X, and a set of models R (i.e., human references) for each test case. With such a testbed, QARLA provides the two essential ingredients required for metric combination:",
"cite_spans": [
{
"start": 71,
"end": 91,
"text": "(Amig\u00f3 et al., 2005)",
"ref_id": "BIBREF2"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Non-Parametric Combination Schemes",
"sec_num": "3"
},
{
"text": "Combination Scheme Metrics are combined inside the QUEEN measure. QUEEN operates under the unanimity principle, i.e., the assumption that a 'good' translation must be similar to all human references according to all metrics. QUEEN X (a) is defined as the probability, over R \u00d7 R \u00d7 R, that, for every metric in X, the automatic translation a is more similar to a human reference r than two other references, r and r , to each other. Formally:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Non-Parametric Combination Schemes",
"sec_num": "3"
},
{
"text": "QUEEN X,R (a) = P rob(\u2200x \u2208 X : x(a, r) \u2265 x(r , r ))",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Non-Parametric Combination Schemes",
"sec_num": "3"
},
{
"text": "where x(a, r) stands for the similarity between a and r according to the metric x. Thus, QUEEN allows us to combine different similarity metrics into a single measure, without having to adjust their relative importance. Besides, QUEEN offers two other important advantages which make it really suitable for metric combination: (i) it is robust against metric redundancy, i.e., metrics covering similar aspects of quality, and (ii) it is not affected by the scale properties of metrics. The main drawback of the QUEEN measure is that it requires at least three human references, when in most cases only a single reference translation is available.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Non-Parametric Combination Schemes",
"sec_num": "3"
},
{
"text": "Metric quality is evaluated using the KING measure of human likeness. All human references are assumed to be equally optimal and, while they are likely to be different, the best similarity metric is the one that identifies and uses the features that are common to all human references, grouping them and separating them from automatic translations. Based on QUEEN, KING represents the probability that a human reference does not receive a lower score than the score attained by any automatic translation. Formally:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Meta-evaluation Criterion",
"sec_num": null
},
{
"text": "KINGA,R(X) = P rob(\u2200a \u2208 A :",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Meta-evaluation Criterion",
"sec_num": null
},
{
"text": "QUEEN X,R\u2212{r} (r) \u2265 QUEEN X,R\u2212{r} (a))",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Meta-evaluation Criterion",
"sec_num": null
},
{
"text": "KING operates, therefore, on the basis of discriminative power. The closest measure to KING is ORANGE (Lin and Och, 2004b) , which is, however, not intended for the purpose of metric combination.",
"cite_spans": [
{
"start": 102,
"end": 122,
"text": "(Lin and Och, 2004b)",
"ref_id": "BIBREF17"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Meta-evaluation Criterion",
"sec_num": null
},
{
"text": "Apart from being non-parametric, QARLA exhibits another important feature which differentiates it form other approaches; besides considering the similarity between automatic translations and human references, QARLA also takes into account the distribution of similarities among human references.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Meta-evaluation Criterion",
"sec_num": null
},
{
"text": "However, QARLA is not well suited to port from human likeness to human acceptability. The reason is that QUEEN is, by definition, a very restrictive measure -a 'good' translation must be similar to all human references according to all metrics. Thus, as the number of metrics increases, it becomes easier to find a metric which does not satisfy the QUEEN assumption. This causes QUEEN values to get close to zero, which turns correlation with human assessments into an impractical meta-evaluation measure.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Meta-evaluation Criterion",
"sec_num": null
},
{
"text": "We have simulated a non-parametric scheme based on human acceptability by working on uniformly averaged linear combinations (ULC) of metrics. Our approach is similar to that of Liu and Gildea (2007) except that in our case all the metrics in the combination are equally important 3 . In other words, ULC is indeed a particular case of a parametric scheme, in which the contribution of each metric is not adjusted. Formally:",
"cite_spans": [
{
"start": 177,
"end": 198,
"text": "Liu and Gildea (2007)",
"ref_id": "BIBREF19"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Meta-evaluation Criterion",
"sec_num": null
},
{
"text": "ULC X (a, R) = 1 |X| x\u2208X x(a, R)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Meta-evaluation Criterion",
"sec_num": null
},
{
"text": "where X is the metric set, and x(a, R) is the similarity between the automatic translation a and the set of references R, for the given test case, according to the metric x. Since correlation with human assessments at the system level is vaguely informative (it is often estimated on very few system samples), we ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Meta-evaluation Criterion",
"sec_num": null
},
{
"text": "In this section, we study the behavior of the two combination schemes presented in Section 3 in the context of four different evaluation scenarios.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Experimental Work",
"sec_num": "4"
},
{
"text": "We use the test beds from the 2004 and 2005 NIST MT Evaluation Campaigns (Le and Przybocki, 2005) 4 . Both campaigns include two different translations exercises: Arabic-to-English ('AE') and Chinese-to-English ('CE'). Human assessments of adequacy and fluency are available for a subset of sentences, each evaluated by two different human judges. See, in Table 1 , a brief numerical description including the number of human references and system outputs available, as well as the number of sentences per output, and the number of system outputs and sentences per system assessed. For metric computation, we have used the IQMT v2.1, which includes metrics at different linguistic levels (lexical, shallow-syntactic, syntactic, shallowsemantic, and semantic). A detailed description may be found in (Gim\u00e9nez, 2007) 5 .",
"cite_spans": [
{
"start": 73,
"end": 99,
"text": "(Le and Przybocki, 2005) 4",
"ref_id": null
},
{
"start": 799,
"end": 814,
"text": "(Gim\u00e9nez, 2007)",
"ref_id": "BIBREF11"
}
],
"ref_spans": [
{
"start": 356,
"end": 363,
"text": "Table 1",
"ref_id": "TABREF1"
}
],
"eq_spans": [],
"section": "Experimental Settings",
"sec_num": "4.1"
},
{
"text": "Prior to studying the effects of metric combination, we study the isolated behaviour of individual metrics. We have selected a set of metric representatives from each linguistic level. The first observation is that the two metaevaluation criteria provide very similar metric quality rankings for a same test bed. This seems to indicate that there is a relationship between the two meta-evaluation criteria employed. We have confirmed this intuition by computing the Pearson correlation coefficient between values in columns 1 to 4 and their counterparts in columns 5 to 8. There exists a high correlation (R = 0.79).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluating Individual Metrics",
"sec_num": "4.2"
},
{
"text": "A second observation is that metric quality varies significantly from task to task. This is due to the significant differences among the test beds employed. These are related to three main aspects: language pair, translation domain, and system typology. For instance, notice that most metrics exhibit a lower quality in the case of the 'AE 05 ' test bed. The reason is that, while in the rest of test beds all systems are statistical, the 'AE 05 ' test bed presents the particularity of providing automatic translations produced by heterogeneous MT systems (i.e., systems belonging to different paradigms) 6 . The fact that most systems are statistical also explains why, in general, lexical metrics exhibit a higher quality. However, highest levels of quality are not in all cases attained by metrics at the lexical level (see highlighted values). In fact, there is only one metric, 'ROUGEW ' (based on lexical matching), which is consistently among the top-scoring in all test beds according to both meta-evaluation criteria. The underlying cause is simple: current metrics do not provide a global measure of quality, but account only for partial aspects of it. Apart from evincing the importance of the meta-evaluation process, these results strongly suggest the need for conducting heterogeneous MT evaluations. ",
"cite_spans": [
{
"start": 606,
"end": 607,
"text": "6",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluating Individual Metrics",
"sec_num": "4.2"
},
{
"text": "In that respect, we study the applicability of the two combination strategies presented. Optimal metric sets are determined by maximizing over the corresponding meta-evaluation measure (KING or R snt ). However, because exploring all possible combinations was not viable, we have used a simple algorithm which performs an approximate search. First, individual metrics are ranked according to their quality. Then, following that order, metrics are added to the optimal set only if in doing so the global quality increases. Since no training is required it has not been necessary to keep a held-out portion of the data for test (see Section 4.4 for further discussion).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Finding Optimal Metric Combinations",
"sec_num": "4.3"
},
{
"text": "Optimal metric sets are displayed in Table 3 . Inside each set, metrics are sorted in decreasing quality order. The 'Optimal Combination' line in Table 2 shows the quality attained by these sets, combined under QUEEN in the case of KING optimization, and under ULC in the case of optimizing over R snt . In most cases optimal sets consist of metrics operating at different linguistic levels, mostly at the lexical and syntactic levels. This is coherent with the findings in Section 4.2. Metrics at the semantic level are selected only in two cases, corresponding to the R snt optimization in 'AE 04 ' and 'CE 04 ' test beds. Also in two cases, corresponding to the KING optimization in 'AE 04 ' and 'CE 05 ' test beds, it has not been possible to find any metric combination which outperforms the best individual metric. This is not a discouraging result. After all, in these cases, the best metric alone achieves already a very high quality (0.79 and 0.70, respectively). The fact that a single feature suffices to discern between manual and automatic translations indicates that MT systems are easily distinguishable, possibly because of their low quality and/or because they are all based on the same translation paradigm.",
"cite_spans": [],
"ref_spans": [
{
"start": 37,
"end": 44,
"text": "Table 3",
"ref_id": "TABREF4"
},
{
"start": 146,
"end": 153,
"text": "Table 2",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Finding Optimal Metric Combinations",
"sec_num": "4.3"
},
{
"text": "It can be argued that metric set optimization is itself a training process; each metric would have an associated binary parameter controlling whether it is selected or not. For that reason, in Table 4 , we have analyzed the portability of optimal metric sets (i) across test beds and (ii) across combination strategies. As to portability across test beds (i.e., across language pairs and years), the reader must focus on the cells for which the meta-evaluation criterion guiding the metric set optimization matches the criterion used in the evaluation, i.e., the top-left and bottom-right 16-cell quadrangles. The fact that the 4 values in each subcolumn are in a very similar range confirms that optimal metric sets port well across test beds. We have also studied the portability of optimal metric sets across combination strategies. In other words, although QUEEN and ULC are thought to operate on metric combinations respectively optimized on the basis of human likeness and human acceptability, we have studied the effects of applying either measure over metric combinations optimized on the basis of the alternative metaevaluation criterion. In this case, the reader must compare top-left vs. bottom-left (KING) and topright vs. bottom-right (R snt ) 16-cell quadrangles. It can be clearly seen that optimal metric sets, in general, do not port well across meta-evaluation criteria, particularly from human likeness to human acceptability. However, interestingly, in the case of 'AE 05 ' (i.e., heterogeneous systems), the optimal metric set ports well from human acceptability to human likeness. We speculate that system heterogeneity has contributed positively for the sake of robustness.",
"cite_spans": [],
"ref_spans": [
{
"start": 193,
"end": 200,
"text": "Table 4",
"ref_id": null
}
],
"eq_spans": [],
"section": "Portability",
"sec_num": "4.4"
},
{
"text": "As an alternative to current parametric combination techniques, we have presented two different meth- Table 4 : Portability of combination strategies ods: a genuine non-parametric method based on human likeness, and a parametric method based human acceptability in which the parameter weights are set equiprobable. We have shown that both strategies may yield a significantly improved quality by combining metrics at different linguistic levels. Besides, we have shown that these methods generalize well across test beds. Thus, a valid path towards heterogeneous automatic MT evaluation has been traced. We strongly believe that future MT evaluation campaigns should benefit from these results specially for the purpose of comparing systems based on different paradigms. These techniques could also be used to build better MT systems by allowing system developers to perform more accurate error analyses and less biased adjustments of system parameters.",
"cite_spans": [],
"ref_spans": [
{
"start": 102,
"end": 109,
"text": "Table 4",
"ref_id": null
}
],
"eq_spans": [],
"section": "Conclusions",
"sec_num": "5"
},
{
"text": "As an additional result, we have found that there is a tight relationship between human acceptability and human likeness. This result, coherent with the findings by Amig\u00f3 et al. (2006) , suggests that the two criteria are interchangeable. This would be a point in favour of combination schemes based on human likeness, since human assessments -which are expensive to acquire, subjective and not reusableare not required. We also interpret this result as an indication that human assessors probably behave in many cases in a discriminative manner. For each test case, assessors would inspect the source sentence and the set of human references trying to identify the features which 'good' translations should comply with, for instance regarding adequacy and fluency. Then, they would evaluate automatic translations roughly according to the number and relevance of the features they share and the ones they do not.",
"cite_spans": [
{
"start": 165,
"end": 184,
"text": "Amig\u00f3 et al. (2006)",
"ref_id": "BIBREF3"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions",
"sec_num": "5"
},
{
"text": "For future work, we plan to study the integration of finer features as well as to conduct a rigorous comparison between parametric and non-parametric combination schemes. This may involve reproducing the works by Kulesza and Shieber (2004) and Albrecht and Hwa (2007a) . This would also allow us to evaluate their approaches in terms of both human likeness and human acceptability, and not only on the latter criterion as they have been evaluated so far.",
"cite_spans": [
{
"start": 213,
"end": 239,
"text": "Kulesza and Shieber (2004)",
"ref_id": "BIBREF14"
},
{
"start": 244,
"end": 268,
"text": "Albrecht and Hwa (2007a)",
"ref_id": "BIBREF0"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions",
"sec_num": "5"
},
{
"text": "ROUGE and METEOR may consider morphological variations. METEOR may also look up for synonyms in WordNet.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "Usually adequacy, fluency, or a combination of the two.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "That would be assuming that all metrics operate in the same range of values, which is not always the case.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "http://www.nist.gov/speech/tests/ summaries/2005/mt05.htm 5 The IQMT Framework may be freely downloaded from http://www.lsi.upc.edu/\u02dcnlp/IQMT.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "Specifically, all systems are statistical except one which is human-aided.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [
{
"text": "This research has been funded by the Spanish Ministry of Education and Science, project OpenMT (TIN2006-15307-C03-02). Our NLP group has been recognized as a Quality Research Group (2005 SGR-00130) by DURSI, the Research Department of the Catalan Government. We are thankful to Enrique Amig\u00f3, for his generous help and valuable comments. We are also grateful to the NIST MT Evaluation Campaign organizers, and participants who agreed to share their system outputs and human assessments for the purpose of this research.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Acknowledgements",
"sec_num": null
}
],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "A Reexamination of Machine Learning Approaches for Sentence-Level MT Evaluation",
"authors": [
{
"first": "Joshua",
"middle": [],
"last": "Albrecht",
"suffix": ""
},
{
"first": "Rebecca",
"middle": [],
"last": "Hwa",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of ACL",
"volume": "",
"issue": "",
"pages": "880--887",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Joshua Albrecht and Rebecca Hwa. 2007a. A Re- examination of Machine Learning Approaches for Sentence-Level MT Evaluation. In Proceedings of ACL, pages 880-887.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Regression for Sentence-Level MT Evaluation with Pseudo References",
"authors": [
{
"first": "Joshua",
"middle": [],
"last": "Albrecht",
"suffix": ""
},
{
"first": "Rebecca",
"middle": [],
"last": "Hwa",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of ACL",
"volume": "",
"issue": "",
"pages": "296--303",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Joshua Albrecht and Rebecca Hwa. 2007b. Regression for Sentence-Level MT Evaluation with Pseudo Refer- ences. In Proceedings of ACL, pages 296-303.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "QARLA: a Framework for the Evaluation of Automatic Sumarization",
"authors": [
{
"first": "Enrique",
"middle": [],
"last": "Amig\u00f3",
"suffix": ""
},
{
"first": "Julio",
"middle": [],
"last": "Gonzalo",
"suffix": ""
},
{
"first": "Anselmo",
"middle": [],
"last": "Pe\u00f1as",
"suffix": ""
},
{
"first": "Felisa",
"middle": [],
"last": "Verdejo",
"suffix": ""
}
],
"year": 2005,
"venue": "Proceedings of the 43th Annual Meeting of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Enrique Amig\u00f3, Julio Gonzalo, Anselmo Pe\u00f1as, and Fe- lisa Verdejo. 2005. QARLA: a Framework for the Evaluation of Automatic Sumarization. In Proceed- ings of the 43th Annual Meeting of the Association for Computational Linguistics.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "MT Evaluation: Human-Like vs. Human Acceptable",
"authors": [
{
"first": "Enrique",
"middle": [],
"last": "Amig\u00f3",
"suffix": ""
},
{
"first": "Jes\u00fas",
"middle": [],
"last": "Gim\u00e9nez",
"suffix": ""
},
{
"first": "Julio",
"middle": [],
"last": "Gonzalo",
"suffix": ""
},
{
"first": "Llu\u00eds",
"middle": [],
"last": "M\u00e0rquez",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of COLING-ACL06",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Enrique Amig\u00f3, Jes\u00fas Gim\u00e9nez, Julio Gonzalo, and Llu\u00eds M\u00e0rquez. 2006. MT Evaluation: Human-Like vs. Hu- man Acceptable. In Proceedings of COLING-ACL06.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "METEOR: An Automatic Metric for MT Evaluation with Improved Correlation with Human Judgments",
"authors": [
{
"first": "Satanjeev",
"middle": [],
"last": "Banerjee",
"suffix": ""
},
{
"first": "Alon",
"middle": [],
"last": "Lavie",
"suffix": ""
}
],
"year": 2005,
"venue": "Proceedings of ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Satanjeev Banerjee and Alon Lavie. 2005. METEOR: An Automatic Metric for MT Evaluation with Im- proved Correlation with Human Judgments. In Pro- ceedings of ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Re-evaluating the Role of BLEU in Machine Translation Research",
"authors": [
{
"first": "Chris",
"middle": [],
"last": "Callison",
"suffix": ""
},
{
"first": "-",
"middle": [],
"last": "Burch",
"suffix": ""
},
{
"first": "Miles",
"middle": [],
"last": "Osborne",
"suffix": ""
},
{
"first": "Philipp",
"middle": [],
"last": "Koehn",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of EACL",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Chris Callison-Burch, Miles Osborne, and Philipp Koehn. 2006. Re-evaluating the Role of BLEU in Ma- chine Translation Research. In Proceedings of EACL.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "A Machine Learning Approach to the Automatic Evaluation of Machine Translation",
"authors": [
{
"first": "Simon",
"middle": [],
"last": "Corston",
"suffix": ""
},
{
"first": "-",
"middle": [],
"last": "Oliver",
"suffix": ""
},
{
"first": "Michael",
"middle": [],
"last": "Gamon",
"suffix": ""
},
{
"first": "Chris",
"middle": [],
"last": "Brockett",
"suffix": ""
}
],
"year": 2001,
"venue": "Proceedings of ACL",
"volume": "",
"issue": "",
"pages": "140--147",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Simon Corston-Oliver, Michael Gamon, and Chris Brockett. 2001. A Machine Learning Approach to the Automatic Evaluation of Machine Translation. In Proceedings of ACL, pages 140-147.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "The Limits of N-gram Translation Evaluation Metrics",
"authors": [
{
"first": "Christopher",
"middle": [],
"last": "Culy",
"suffix": ""
},
{
"first": "Susanne",
"middle": [
"Z"
],
"last": "Riehemann",
"suffix": ""
}
],
"year": 2003,
"venue": "Proceedings of MT-SUMMIT IX",
"volume": "",
"issue": "",
"pages": "1--8",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Christopher Culy and Susanne Z. Riehemann. 2003. The Limits of N-gram Translation Evaluation Metrics. In Proceedings of MT-SUMMIT IX, pages 1-8.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Automatic Evaluation of Machine Translation Quality Using N-gram Co-Occurrence Statistics",
"authors": [
{
"first": "George",
"middle": [],
"last": "Doddington",
"suffix": ""
}
],
"year": 2002,
"venue": "Proceedings of the 2nd IHLT",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "George Doddington. 2002. Automatic Evaluation of Machine Translation Quality Using N-gram Co- Occurrence Statistics. In Proceedings of the 2nd IHLT.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Sentence-Level MT evaluation without reference translations: beyond language modeling",
"authors": [
{
"first": "Michael",
"middle": [],
"last": "Gamon",
"suffix": ""
},
{
"first": "Anthony",
"middle": [],
"last": "Aue",
"suffix": ""
},
{
"first": "Martine",
"middle": [],
"last": "Smets",
"suffix": ""
}
],
"year": 2005,
"venue": "Proceedings of EAMT",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Michael Gamon, Anthony Aue, and Martine Smets. 2005. Sentence-Level MT evaluation without refer- ence translations: beyond language modeling. In Pro- ceedings of EAMT.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Linguistic Features for Automatic Evaluation of Heterogeneous MT Systems",
"authors": [
{
"first": "Jes\u00fas",
"middle": [],
"last": "Gim\u00e9nez",
"suffix": ""
},
{
"first": "Llu\u00eds",
"middle": [],
"last": "M\u00e0rquez",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of the ACL Workshop on Statistical Machine Translation",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jes\u00fas Gim\u00e9nez and Llu\u00eds M\u00e0rquez. 2007. Linguistic Features for Automatic Evaluation of Heterogeneous MT Systems. In Proceedings of the ACL Workshop on Statistical Machine Translation.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "IQMT v 2.1. Technical Manual",
"authors": [
{
"first": "Jes\u00fas",
"middle": [],
"last": "Gim\u00e9nez",
"suffix": ""
}
],
"year": 2007,
"venue": "TALP Research Center. LSI Department",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jes\u00fas Gim\u00e9nez. 2007. IQMT v 2.1. Technical Manual. Technical report, TALP Research Center. LSI Department. http://www.lsi.upc.edu/\u02dcnlp/IQMT/- IQMT.v2.1.pdf.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Paraphrasing for Automatic Evaluation",
"authors": [
{
"first": "David",
"middle": [],
"last": "Kauchak",
"suffix": ""
},
{
"first": "Regina",
"middle": [],
"last": "Barzilay",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of NLH-NAACL",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "David Kauchak and Regina Barzilay. 2006. Paraphras- ing for Automatic Evaluation. In Proceedings of NLH- NAACL.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Manual and Automatic Evaluation of Machine Translation between European Languages",
"authors": [
{
"first": "Philipp",
"middle": [],
"last": "Koehn",
"suffix": ""
},
{
"first": "Christof",
"middle": [],
"last": "Monz",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of the Workshop on Statistical Machine Translation",
"volume": "",
"issue": "",
"pages": "102--121",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Philipp Koehn and Christof Monz. 2006. Manual and Automatic Evaluation of Machine Translation between European Languages. In Proceedings of the Workshop on Statistical Machine Translation, pages 102-121.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "A learning approach to improving sentence-level MT evaluation",
"authors": [
{
"first": "Alex",
"middle": [],
"last": "Kulesza",
"suffix": ""
},
{
"first": "Stuart",
"middle": [
"M"
],
"last": "Shieber",
"suffix": ""
}
],
"year": 2004,
"venue": "Proceedings of the 10th International Conference on Theoretical and Methodological Issues in Machine Translation",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Alex Kulesza and Stuart M. Shieber. 2004. A learning approach to improving sentence-level MT evaluation. In Proceedings of the 10th International Conference on Theoretical and Methodological Issues in Machine Translation.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "NIST 2005 machine translation evaluation official results",
"authors": [
{
"first": "Audrey",
"middle": [],
"last": "Le",
"suffix": ""
},
{
"first": "Mark",
"middle": [],
"last": "Przybocki",
"suffix": ""
}
],
"year": 2005,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Audrey Le and Mark Przybocki. 2005. NIST 2005 ma- chine translation evaluation official results. Technical report, NIST, August.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "Automatic Evaluation of Machine Translation Quality Using Longest Common Subsequence and Skip-Bigram Statics",
"authors": [
{
"first": "Chin-Yew",
"middle": [],
"last": "Lin",
"suffix": ""
},
{
"first": "Franz Josef",
"middle": [],
"last": "Och",
"suffix": ""
}
],
"year": 2004,
"venue": "Proceedings of ACL",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Chin-Yew Lin and Franz Josef Och. 2004a. Auto- matic Evaluation of Machine Translation Quality Us- ing Longest Common Subsequence and Skip-Bigram Statics. In Proceedings of ACL.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "ORANGE: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
"authors": [
{
"first": "Chin-Yew",
"middle": [],
"last": "Lin",
"suffix": ""
},
{
"first": "Franz Josef",
"middle": [],
"last": "Och",
"suffix": ""
}
],
"year": 2004,
"venue": "Proceedings of COLING",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Chin-Yew Lin and Franz Josef Och. 2004b. ORANGE: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation. In Proceedings of COLING.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "Syntactic Features for Evaluation of Machine Translation",
"authors": [
{
"first": "Ding",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Daniel",
"middle": [],
"last": "Gildea",
"suffix": ""
}
],
"year": 2005,
"venue": "Proceedings of ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Ding Liu and Daniel Gildea. 2005. Syntactic Features for Evaluation of Machine Translation. In Proceed- ings of ACL Workshop on Intrinsic and Extrinsic Eval- uation Measures for Machine Translation and/or Sum- marization.",
"links": null
},
"BIBREF19": {
"ref_id": "b19",
"title": "Source-Language Features and Maximum Correlation Training for Machine Translation Evaluation",
"authors": [
{
"first": "Ding",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Daniel",
"middle": [],
"last": "Gildea",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of the 2007 Meeting of the North American chapter of the Association for Computational Linguistics (NAACL-07)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Ding Liu and Daniel Gildea. 2007. Source-Language Features and Maximum Correlation Training for Ma- chine Translation Evaluation. In Proceedings of the 2007 Meeting of the North American chapter of the As- sociation for Computational Linguistics (NAACL-07).",
"links": null
},
"BIBREF20": {
"ref_id": "b20",
"title": "BLEUATRE: Flattening Syntactic Dependencies for MT Evaluation",
"authors": [
{
"first": "Dennis",
"middle": [],
"last": "Mehay",
"suffix": ""
},
{
"first": "Chris",
"middle": [],
"last": "Brew",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of the 11th Conference on Theoretical and Methodological Issues in Machine Translation (TMI)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Dennis Mehay and Chris Brew. 2007. BLEUATRE: Flattening Syntactic Dependencies for MT Evaluation. In Proceedings of the 11th Conference on Theoreti- cal and Methodological Issues in Machine Translation (TMI).",
"links": null
},
"BIBREF21": {
"ref_id": "b21",
"title": "Precision and Recall of Machine Translation",
"authors": [
{
"first": "I",
"middle": [],
"last": "",
"suffix": ""
},
{
"first": "Dan",
"middle": [],
"last": "Melamed",
"suffix": ""
},
{
"first": "Ryan",
"middle": [],
"last": "Green",
"suffix": ""
},
{
"first": "Joseph",
"middle": [
"P"
],
"last": "Turian",
"suffix": ""
}
],
"year": 2003,
"venue": "Proceedings of HLT/NAACL",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "I. Dan Melamed, Ryan Green, and Joseph P. Turian. 2003. Precision and Recall of Machine Translation. In Proceedings of HLT/NAACL.",
"links": null
},
"BIBREF22": {
"ref_id": "b22",
"title": "Evaluation Tool for Machine Translation: Fast Evaluation for MT Research",
"authors": [
{
"first": "Sonja",
"middle": [],
"last": "Nie\u00dfen",
"suffix": ""
},
{
"first": "Franz",
"middle": [
"Josef"
],
"last": "Och",
"suffix": ""
},
{
"first": "Gregor",
"middle": [],
"last": "Leusch",
"suffix": ""
},
{
"first": "Hermann",
"middle": [],
"last": "Ney",
"suffix": ""
}
],
"year": 2000,
"venue": "Proceedings of the 2nd LREC",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Sonja Nie\u00dfen, Franz Josef Och, Gregor Leusch, and Her- mann Ney. 2000. Evaluation Tool for Machine Trans- lation: Fast Evaluation for MT Research. In Proceed- ings of the 2nd LREC.",
"links": null
},
"BIBREF23": {
"ref_id": "b23",
"title": "Contextual Bitext-Derived Paraphrases in Automatic MT Evaluation",
"authors": [
{
"first": "Karolina",
"middle": [],
"last": "Owczarzak",
"suffix": ""
},
{
"first": "Declan",
"middle": [],
"last": "Groves",
"suffix": ""
},
{
"first": "Josef",
"middle": [],
"last": "Van Genabith",
"suffix": ""
},
{
"first": "Andy",
"middle": [],
"last": "Way",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas (AMTA)",
"volume": "",
"issue": "",
"pages": "148--155",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Karolina Owczarzak, Declan Groves, Josef Van Gen- abith, and Andy Way. 2006. Contextual Bitext- Derived Paraphrases in Automatic MT Evaluation. In Proceedings of the 7th Conference of the Associa- tion for Machine Translation in the Americas (AMTA), pages 148-155.",
"links": null
},
"BIBREF24": {
"ref_id": "b24",
"title": "Dependency-Based Automatic Evaluation for Machine Translation",
"authors": [
{
"first": "Karolina",
"middle": [],
"last": "Owczarzak",
"suffix": ""
},
{
"first": "Josef",
"middle": [],
"last": "Van Genabith",
"suffix": ""
},
{
"first": "Andy",
"middle": [],
"last": "Way",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of SSST, NAACL-HLT/AMTA Workshop on Syntax and Structure in Statistical Translation",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Karolina Owczarzak, Josef van Genabith, and Andy Way. 2007. Dependency-Based Automatic Evalua- tion for Machine Translation. In Proceedings of SSST, NAACL-HLT/AMTA Workshop on Syntax and Struc- ture in Statistical Translation.",
"links": null
},
"BIBREF25": {
"ref_id": "b25",
"title": "Bleu: a method for automatic evaluation of machine translation, RC22176, IBM",
"authors": [
{
"first": "Kishore",
"middle": [],
"last": "Papineni",
"suffix": ""
},
{
"first": "Salim",
"middle": [],
"last": "Roukos",
"suffix": ""
},
{
"first": "Todd",
"middle": [],
"last": "Ward",
"suffix": ""
},
{
"first": "Wei-Jing",
"middle": [],
"last": "Zhu",
"suffix": ""
}
],
"year": 2001,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2001. Bleu: a method for automatic evalu- ation of machine translation, RC22176, IBM. Techni- cal report, IBM T.J. Watson Research Center.",
"links": null
},
"BIBREF26": {
"ref_id": "b26",
"title": "Training a Sentence-Level Machine Translation Confidence Metric",
"authors": [
{
"first": "Chris",
"middle": [],
"last": "Quirk",
"suffix": ""
}
],
"year": 2004,
"venue": "Proceedings of LREC",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Chris Quirk. 2004. Training a Sentence-Level Ma- chine Translation Confidence Metric. In Proceedings of LREC.",
"links": null
},
"BIBREF27": {
"ref_id": "b27",
"title": "A Study of Translation Edit Rate with Targeted Human Annotation",
"authors": [
{
"first": "Matthew",
"middle": [],
"last": "Snover",
"suffix": ""
},
{
"first": "Bonnie",
"middle": [],
"last": "Dorr",
"suffix": ""
},
{
"first": "Richard",
"middle": [],
"last": "Schwartz",
"suffix": ""
},
{
"first": "Linnea",
"middle": [],
"last": "Micciulla",
"suffix": ""
},
{
"first": "John",
"middle": [],
"last": "Makhoul",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of AMTA",
"volume": "",
"issue": "",
"pages": "223--231",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Matthew Snover, Bonnie Dorr, Richard Schwartz, Lin- nea Micciulla, , and John Makhoul. 2006. A Study of Translation Edit Rate with Targeted Human Anno- tation. In Proceedings of AMTA, pages 223-231.",
"links": null
},
"BIBREF28": {
"ref_id": "b28",
"title": "Re-evaluating Machine Translation Results with Paraphrase Support",
"authors": [
{
"first": "Liang",
"middle": [],
"last": "Zhou",
"suffix": ""
},
{
"first": "Chin-Yew",
"middle": [],
"last": "Lin",
"suffix": ""
},
{
"first": "Eduard",
"middle": [],
"last": "Hovy",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of EMNLP",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Liang Zhou, Chin-Yew Lin, and Eduard Hovy. 2006. Re-evaluating Machine Translation Results with Para- phrase Support. In Proceedings of EMNLP.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"text": "Opt.K(AE.04) = {SP-NISTp} Opt.K(CE.04) = {ROUGEW , SP-NISTp, ROUGEL} Opt.K(AE.05) = {METEORwnsyn, SP-NISTp, DP-Or-*} Opt.K(CE.05) = {SP-NISTp} Opt.R(AE.04) = {ROUGEW , ROUGEL, CP-Oc-*, METEORwnsyn, DP-Or-*, DP-O l -*, GTM.e2, DR-Or-*, CP-STM} Opt.R(CE.04) = {ROUGEL, CP-Oc-*, ROUGEW , SP-Op-*, METEORwnsyn, DP-Or-*, GTM.e2, 1-WER, DR-Or-*} Opt.R(AE.05) = {DP-Or-*, ROUGEW } Opt.R(CE.05) = {ROUGEW , ROUGEL, DP GTM.e2,",
"type_str": "figure",
"num": null,
"uris": null
},
"TABREF1": {
"type_str": "table",
"text": "",
"num": null,
"content": "<table><tr><td>: Description of the test beds</td></tr><tr><td>evaluate metric quality in terms of correlation with</td></tr><tr><td>human assessments at the sentence level (R snt ). We</td></tr><tr><td>use the sum of adequacy and fluency to simulate a</td></tr><tr><td>global assessment of quality.</td></tr></table>",
"html": null
},
"TABREF2": {
"type_str": "table",
"text": "",
"num": null,
"content": "<table><tr><td>shows meta-</td></tr></table>",
"html": null
},
"TABREF3": {
"type_str": "table",
"text": "",
"num": null,
"content": "<table><tr><td>: Metric Meta-evaluation</td></tr></table>",
"html": null
},
"TABREF4": {
"type_str": "table",
"text": "Optimal metric sets",
"num": null,
"content": "<table/>",
"html": null
}
}
}
} |