File size: 100,775 Bytes
6fa4bc9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 |
{
"paper_id": "I11-1004",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T07:31:37.047717Z"
},
"title": "Extracting Pre-ordering Rules from Predicate-Argument Structures",
"authors": [
{
"first": "Xianchao",
"middle": [],
"last": "Wu",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "NTT Corporation",
"location": {
"addrLine": "2-4 Hikaridai Seika-cho, Soraku-gun Kyoto",
"postCode": "619-0237",
"country": "Japan"
}
},
"email": "wu.xianchao@lab.ntt.co.jp"
},
{
"first": "Katsuhito",
"middle": [],
"last": "Sudoh",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "NTT Corporation",
"location": {
"addrLine": "2-4 Hikaridai Seika-cho, Soraku-gun Kyoto",
"postCode": "619-0237",
"country": "Japan"
}
},
"email": "sudoh.katsuhito@lab.ntt.co.jp"
},
{
"first": "Kevin",
"middle": [],
"last": "Duh",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "NTT Corporation",
"location": {
"addrLine": "2-4 Hikaridai Seika-cho, Soraku-gun Kyoto",
"postCode": "619-0237",
"country": "Japan"
}
},
"email": "kevin.duh@lab.ntt.co.jp"
},
{
"first": "Hajime",
"middle": [],
"last": "Tsukada",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "NTT Corporation",
"location": {
"addrLine": "2-4 Hikaridai Seika-cho, Soraku-gun Kyoto",
"postCode": "619-0237",
"country": "Japan"
}
},
"email": "tsukada.hajime@lab.ntt.co.jp"
},
{
"first": "Masaaki",
"middle": [],
"last": "Nagata",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "NTT Corporation",
"location": {
"addrLine": "2-4 Hikaridai Seika-cho, Soraku-gun Kyoto",
"postCode": "619-0237",
"country": "Japan"
}
},
"email": "nagata.masaaki@lab.ntt.co.jp"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "Word ordering remains as an essential problem for translating between languages with substantial structural differences, such as SOV and SVO languages. In this paper, we propose to automatically extract pre-ordering rules from predicateargument structures. A pre-ordering rule records the relative position mapping of a predicate word and its argument phrases from the source language side to the target language side. We propose 1) a lineartime algorithm to extract the pre-ordering rules from word-aligned HPSG-tree-tostring pairs and 2) a bottom-up algorithm to apply the extracted rules to HPSG trees to yield target language style source sentences. Experimental results are reported for large-scale English-to-Japanese translation, showing significant improvements of BLEU score compared with the baseline SMT systems.",
"pdf_parse": {
"paper_id": "I11-1004",
"_pdf_hash": "",
"abstract": [
{
"text": "Word ordering remains as an essential problem for translating between languages with substantial structural differences, such as SOV and SVO languages. In this paper, we propose to automatically extract pre-ordering rules from predicateargument structures. A pre-ordering rule records the relative position mapping of a predicate word and its argument phrases from the source language side to the target language side. We propose 1) a lineartime algorithm to extract the pre-ordering rules from word-aligned HPSG-tree-tostring pairs and 2) a bottom-up algorithm to apply the extracted rules to HPSG trees to yield target language style source sentences. Experimental results are reported for large-scale English-to-Japanese translation, showing significant improvements of BLEU score compared with the baseline SMT systems.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Statistical machine translation (SMT) suffers from an essential problem for translating between languages with substantial structural differences, such as between English which is a subject-verbobject (SVO) language and Japanese which is a typical subject-object-verb (SOV) language.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Numerous approaches have been consequently proposed to tackle this word-order problem, such as lexicalized reordering methods, syntax-based models, and pre-ordering ways. First, in order to overcome the shortages of traditional distance based distortion models (Brown et al., 1993; , phrase dependent lexicalized reordering models were proposed by several researchers (Tillman, 2004; Kumar and Byrne, 2005) . Lexicalized reordering models learn local orientations (monotone or reordering) with probabilities for each bilingual phrase from the training data. For example, by taking lexical information as features, a maximum entropy phrase reordering model was proposed by Xiong et al. (2006) .",
"cite_spans": [
{
"start": 261,
"end": 281,
"text": "(Brown et al., 1993;",
"ref_id": "BIBREF0"
},
{
"start": 368,
"end": 383,
"text": "(Tillman, 2004;",
"ref_id": "BIBREF20"
},
{
"start": 384,
"end": 406,
"text": "Kumar and Byrne, 2005)",
"ref_id": "BIBREF11"
},
{
"start": 672,
"end": 691,
"text": "Xiong et al. (2006)",
"ref_id": "BIBREF26"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Second, syntax-based models attempt to solve the word ordering problem by employing syntactic structures. For example, linguistically syntaxbased approaches (Galley et al., 2004; first parse source and/or target sentences and then learn reordering templates from the subtree fragments of the parse trees. In contrast, hierarchical phrase based translation (Chiang, 2005 ) is a formally syntax-based approach which can automatically extract hierarchical ordering rules from aligned string-string pairs without using additional parsers. These approaches have been proved to be both algorithmically appealing and empirically successful.",
"cite_spans": [
{
"start": 157,
"end": 178,
"text": "(Galley et al., 2004;",
"ref_id": "BIBREF3"
},
{
"start": 356,
"end": 369,
"text": "(Chiang, 2005",
"ref_id": "BIBREF1"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "However, most of current syntax-based SMT systems use IBM models (Brown et al., 1993 ) and hidden Markov model (HMM) (Vogel et al., 1996) to generate word alignments. These models have a penalty parameter associated with long distance jumps, and tend to misalign words which move far from the window sizes of their expected positions (Xu et al., 2009; Genzel, 2010) .",
"cite_spans": [
{
"start": 65,
"end": 84,
"text": "(Brown et al., 1993",
"ref_id": "BIBREF0"
},
{
"start": 117,
"end": 137,
"text": "(Vogel et al., 1996)",
"ref_id": "BIBREF21"
},
{
"start": 334,
"end": 351,
"text": "(Xu et al., 2009;",
"ref_id": "BIBREF27"
},
{
"start": 352,
"end": 365,
"text": "Genzel, 2010)",
"ref_id": "BIBREF4"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The third type tackles the word-order problem in pre-ordering ways. Through the usage of a sequence of pre-ordering rules, the word order of an original source sentence is (approximately) changed into the word order of the target sentence. Here, the pre-ordering rules can be manually or automatically extracted. For manual extraction of pre-ordering rules, linguistic background and expertise are required for predetermined language pairs, such as for German-English (Collins et al., 2005) , Chinese-to-English (Wang et al., 2007) , Japanese-to-English (Katz-Brown and , and English-to-SOV languages (Xu et al., 2009) . Specially, for English-to-Japanese translation, Isozaki et al. (2010b) proposed to move syntactic or semantic heads to the end of corresponding phrases or clauses so that to yield head finalized English (HFE) sentences which follow the word order of Japanese. The head information of an English sentence is detected by a head-driven phrase structure grammar (HPSG) parser, Enju 1 (Miyao and Tsujii, 2008) . In addition, transformation rules were manually written for appending particle seed words, refining POS tags to be used before parsing, and deleting English determiners. Due to the usage of the same parser, we take this HFE approach as one of our baseline systems.",
"cite_spans": [
{
"start": 468,
"end": 490,
"text": "(Collins et al., 2005)",
"ref_id": "BIBREF2"
},
{
"start": 512,
"end": 531,
"text": "(Wang et al., 2007)",
"ref_id": "BIBREF22"
},
{
"start": 601,
"end": 618,
"text": "(Xu et al., 2009)",
"ref_id": "BIBREF27"
},
{
"start": 669,
"end": 691,
"text": "Isozaki et al. (2010b)",
"ref_id": "BIBREF6"
},
{
"start": 1001,
"end": 1025,
"text": "(Miyao and Tsujii, 2008)",
"ref_id": "BIBREF13"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The goal in this paper, however, is to learn preordering rules from parallel data in an automatic way. Under this motivation, pre-ordering rules can be extracted in a language-independent manner. A number of researches follow this automatic way. For example, in (Xia and McCord, 2004) , a variety of heuristic rules were applied to bilingual parse trees to extract pre-ordering rules for French-English translation. Rottmann and Vogen (2007) learned reordering rules based on sequences of part-of-speech (POS) tags, instead of parse trees. Dependency trees were used by Genzel (2010) to extract source-side reordering rules for translating languages from SVO to SOV, etc..",
"cite_spans": [
{
"start": 262,
"end": 284,
"text": "(Xia and McCord, 2004)",
"ref_id": "BIBREF25"
},
{
"start": 416,
"end": 441,
"text": "Rottmann and Vogen (2007)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The novel idea expressed in this paper is that, predicate-argument structures (PASs) are introduced to extract fine-grained pre-ordering rules. PASs have the following merits for describing reordering phenomena:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "\u2022 predicate words and argument phrases respectively record reordering phenomena in a lexicalized level and an abstract level;",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "\u2022 PASs provide a fine-grained classification of the reordering phenomena since they include factored representations of syntactic features of the predicate words and their argument phrases.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The idea of using PASs for pre-ordering follows (Komachi et al., 2006) . Several reordering operations were manually designed by Komachi et al. (2006) to pre-ordering Japanese sentences into SVO-style English sentences. For comparison, our proposal 1) makes use of not only PASs but also the source syntactic tree structures for preordering rule matching, 2) extracts pre-ordering 1 http://www-tsujii.is.s.u-tokyo.ac.jp/enju/index.html rules in an automatic way, and 3) use factored representations of syntactic features to refine the preordering rules.",
"cite_spans": [
{
"start": 48,
"end": 70,
"text": "(Komachi et al., 2006)",
"ref_id": "BIBREF10"
},
{
"start": 129,
"end": 150,
"text": "Komachi et al. (2006)",
"ref_id": "BIBREF10"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Following (Wu et al., 2010a; Isozaki et al., 2010b) , we use the HPSG parser Enju to generate the PASs of English sentences. HPSG (Pollard and Sag, 1994 ) is a lexicalist grammar framework. In HPSG, linguistic entities such as words and phrases are represented by a data structure called a sign. A sign gives a factored representation of the syntactic features of a word/phrase, as well as a representation of their semantic content which corresponds to PASs.",
"cite_spans": [
{
"start": 10,
"end": 28,
"text": "(Wu et al., 2010a;",
"ref_id": "BIBREF23"
},
{
"start": 29,
"end": 51,
"text": "Isozaki et al., 2010b)",
"ref_id": "BIBREF6"
},
{
"start": 130,
"end": 152,
"text": "(Pollard and Sag, 1994",
"ref_id": "BIBREF17"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "In order to record the relative positions among a predicate word and its argument phrases, we propose a linear-time algorithm to extract preordering rules from word-aligned HPSG-tree-tostring pairs 2 . The syntactic features included in signs and the types of PASs enable us to extract fine-grained pre-ordering rules and thus make it easier to select appropriate rules for given source HPSG trees. We further propose a bottom-up algorithm to apply the extracted rules to HPSG trees to pre-order source sentences. Using the preordered source sentences, we retrain word alignments again.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The remaining of this paper is organized as follows. In the next section, we describe the algorithms guided by using a real example for extracting and applying PAS-based pre-ordering rules. Then, we design experiments on large-scale English-to-Japanese translation to testify our proposal. Employing Moses , we show that our proposal can significantly improve BLEU scores of 2.47\u223c3.15 points compared with using the original English sentences. We finally conclude this paper by summarizing our proposal and the experiment results. denote non-terminal nodes (e.g., c0, c1), and the identifiers that start with 't' denote terminal nodes (e.g., t0, t2). In a complete HPSG tree (Wu et al., 2010b) , factored syntactic features listed in Table 1 are included in the terminal and nonterminal signs. These features are used by us to sub-categorize pre-ordering rules. As an example of the XML output of Enju, the signs of \"when\" (t0) and its arguments c16, c3 are shown in the top-left corner of Figure 1.",
"cite_spans": [
{
"start": 675,
"end": 693,
"text": "(Wu et al., 2010b)",
"ref_id": "BIBREF24"
}
],
"ref_spans": [
{
"start": 734,
"end": 741,
"text": "Table 1",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "\u6d41 \u4f53 0 \u5727 1 \u30b7 \u30ea \u30f3 \u30c0 \u308c \u308b 12 \u3053 \u3068 13 31 3 \u306e 4 \u5834 \u5408 5 \u306f 6 \u6d41 \u4f53 7 \u304c 8 \u5f90 \u3005 \u306b 9 \u6392 \u51fa 10 \u3055 11 \u306a",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "We define the following data structures for both extracting and applying pre-ordering rules. First, a PAS-based pre-ordering rule is defined to be a four-tuple <pw, args, srcOrder, trgOrder>. Here, pw is the predicate word, args are the argument nodes of pw, and srcOrder and trgOrder respectively record the relative positions among pw and args in the source and target language sides. Then, we suppose an HPSG tree/subtree object contains the following methods:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Data structures",
"sec_num": "2.2"
},
{
"text": "\u2022 localize(): localize syntactic/semantic heads;",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Data structures",
"sec_num": "2.2"
},
{
"text": "\u2022 computeSrcSpans(): topologically compute the source span of each node;",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Data structures",
"sec_num": "2.2"
},
{
"text": "\u2022 computeSpans(A): topologically compute the source and target spans of each node (Galley et al., 2004) . A is the word alignment;",
"cite_spans": [
{
"start": 82,
"end": 103,
"text": "(Galley et al., 2004)",
"ref_id": "BIBREF3"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Data structures",
"sec_num": "2.2"
},
{
"text": "\u2022 getArgs(pw): return the argument nodes of pw;",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Data structures",
"sec_num": "2.2"
},
{
"text": "Name Description Examples WORD surface word form \"when\" BASE base word form \"when\" POS part-of-speech WRB (\"when\") LE lexical entry [when] (\"when\") PRED type of predicate conj arg12 argument structure (\"when\") CAT syntactic category SC (\"when\") TENSE tense of a verb (past, present (\"used\") present, untensed) ASPECT aspect of a verb none (\"used\") (none, prefect, progressive, prefect-progressive) VOICE voice of a verb passive (\"used\") (passive, active) AUX auxiliary verb or not minus (\"used\") (minus, modal, have, be, do, to, copular) CAT syntactic category S (c16), S (c3) XCAT extended category HEAD syntactic head R (c16), R(c3) SEM HEAD semantic head R (c16), R (c3) SCHEMA schema rule mod head (c16) \u2022 MCT(pw, args): return the minimum cover tree (Wu et al., 2010a) of pw and args.",
"cite_spans": [
{
"start": 755,
"end": 773,
"text": "(Wu et al., 2010a)",
"ref_id": "BIBREF23"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Data structures",
"sec_num": "2.2"
},
{
"text": "To implement the localize() method, we use the approach described in (Wu et al., 2010a) . That is, we replace the pointer values of HEAD and SEM HEAD features in non-terminal nodes with three labels: \"S\" for single daughter, \"L\" for the left-hand-side daughter, and \"R\" for the right-hand-side daughter. For example, for node c16 in Figure 1 , its HEAD and SEM HEAD will change from c18 to \"R\".",
"cite_spans": [
{
"start": 69,
"end": 87,
"text": "(Wu et al., 2010a)",
"ref_id": "BIBREF23"
}
],
"ref_spans": [
{
"start": 333,
"end": 341,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Data structures",
"sec_num": "2.2"
},
{
"text": "We use the concept of minimum covering trees (MCT) defined in (Wu et al., 2010b) to guide the pre-ordering process. A MCT is a subtree of the original HPSG tree that takes a predicate node and its argument nodes as (new) leaf nodes. For example, as shown in the top-right corner of Figure 1 , the MCT of \"when\" (t0) and its argument nodes c3, c16 is \"c0(c1(c2(t0)c3)c16)\".",
"cite_spans": [
{
"start": 62,
"end": 80,
"text": "(Wu et al., 2010b)",
"ref_id": "BIBREF24"
}
],
"ref_spans": [
{
"start": 282,
"end": 290,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Data structures",
"sec_num": "2.2"
},
{
"text": "Finally, the attributes in the nodes of an HPSG tree include: 1) pred: the PAS of a leaf node, 2) srcSpan: the index set of the source words that current node covers, 3) trgSpan: the index set of the target words that srcSpan aligned to, and 4) sr-cPhrase that stores the pre-ordered source phrase covered by current node.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Data structures",
"sec_num": "2.2"
},
{
"text": "We express the idea for extracting PAS-based preordering rules by using the first word \"when\" of the English sentence in Figure 1 . Given the PAS information of \"when\" (t0) in the English side, we need to determine the target-side-order among t0 and its two arguments c16, c3. To achieve this, we compute the target spans of these three nodes by using current word alignment and then sort their target spans. Through referring to the word alignment shown in Figure 1 , we can collect the target spans which are {5}, {4,0,1,2,3,6,15}, and {7,8,9,10,11,12,13} respectively for t0, c3, and c16. However, we cannot sort these three spans since there are overlapping between the first two spans 3 . In order to solve this problem, we sort the spans in a heuristic way. Note that in c3's target span, five indices are smaller than 5 yet only two indices are larger than 5. Thus, we take {4,0,1,2,3,6,15} to be dominantly smaller than {5}. Now, we can determine the pre-order rule guided by the PAS of t0 to be \"t0 c3 c16 \u2192 c3 t0 c16\" and formally to be \"t0 0 c3 1 c16 2 \u2192 1 0 2\". Generally, we use the following heuristic rules to sort two spans, named span A and span B:",
"cite_spans": [],
"ref_spans": [
{
"start": 121,
"end": 129,
"text": "Figure 1",
"ref_id": "FIGREF0"
},
{
"start": 458,
"end": 466,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Rule extraction algorithm",
"sec_num": "2.3"
},
{
"text": "\u2022 if more than half of numbers in A is bigger than the maximum number in B, or if more than half of numbers in B is smaller than the minimum number in A, then B < A;",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Rule extraction algorithm",
"sec_num": "2.3"
},
{
"text": "Algorithm 1 Pre-ordering Rule Extraction",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Rule extraction algorithm",
"sec_num": "2.3"
},
{
"text": "Input: HPSG tree TE of an English sentence E, word alignment A Output: a pre-ordering rule set R 1: TE.localize() 2: TE.computeSpans(A) 3: for each leaf node t of TE do 4:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Rule extraction algorithm",
"sec_num": "2.3"
},
{
"text": "if t.pred is opened and t.trgSpan != NULL then 5:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Rule extraction algorithm",
"sec_num": "2.3"
},
{
"text": "Node[] args \u2190 TE.getArgs(t) 6:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Rule extraction algorithm",
"sec_num": "2.3"
},
{
"text": "if all nodes in args are aligned then 7:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Rule extraction algorithm",
"sec_num": "2.3"
},
{
"text": "int[] srcOrder \u2190 SORTSPANS(t.srcSpan, src-Spans of args) 8:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Rule extraction algorithm",
"sec_num": "2.3"
},
{
"text": "int[] trgOrder \u2190 SORTSPANS(t.trgSpan, trgSpans of args) 9:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Rule extraction algorithm",
"sec_num": "2.3"
},
{
"text": "R.add(< t, args, srcOrder, trgOrder>) 10:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Rule extraction algorithm",
"sec_num": "2.3"
},
{
"text": "end if 11:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Rule extraction algorithm",
"sec_num": "2.3"
},
{
"text": "end if 12: end for",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Rule extraction algorithm",
"sec_num": "2.3"
},
{
"text": "\u2022 if more than half of numbers in B is bigger than the maximum number in A, or if more than half of numbers in A is smaller than the minimum number in B, then A < B.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Rule extraction algorithm",
"sec_num": "2.3"
},
{
"text": "In case of a tie (e.g., A={3,4,7,8}, B={5,6}), we keep the original order of A and B in the sourceside sentence without any reordering. Algorithm 1 sketches the pre-ordering rule extraction algorithm guided by PASs. The algorithm collect pre-ordering rules through a traversal of the leaf nodes in an HPSG tree. A non-terminal node will not be accessed unless it is an argument of some predicate node(s). Thus, this algorithm runs in a time that is approximately linear to the number of leaf nodes in the tree, i.e., the number of words in the source sentence.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Rule extraction algorithm",
"sec_num": "2.3"
},
{
"text": "We define that a terminal node's PAS is opened if at least one of its arguments is neither empty nor unknown. We will not extract a pre-ordering rule if the terminal node is unaligned or any of its argument node is unaligned. These constraints are reflected by Line 4 and 6 in Algorithm 1. After heuristically sorting the source/target spans of a predicate node and its argument nodes, we finally extract a pre-ordering rule. Table 2 summarizes the PAS-based pre-ordering rules extracted from the example shown in Figure 1. Application of these pre-ordering rules to the original English sentence yields the following Japanese style sentence:",
"cite_spans": [],
"ref_spans": [
{
"start": 426,
"end": 433,
"text": "Table 2",
"ref_id": "TABREF4"
},
{
"start": 514,
"end": 520,
"text": "Figure",
"ref_id": null
}
],
"eq_spans": [],
"section": "Rule extraction algorithm",
"sec_num": "2.3"
},
{
"text": "\u2022 the fluid pressure cylinder 31 is used when, fluid is gradually applied. Algorithm 2 sketches the algorithm for applying pre-ordering rules to a given HPSG tree T E . The algorithm contains three parts: rule matching (Lines 4-12), bottom-up rule applying (Lines 13-19), and sentence collecting (Lines 20-26). We first retrieve available pre-ordering rules from rule set R by a left-to-right traversal of the leaf nodes of T E . For each leaf node, we select one preordering rule with the highest frequency. Our experiments testified that this greedy rule selection strategy worked quite well. We selected 93% of the top frequent rule without facing a tie.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Rule extraction algorithm",
"sec_num": "2.3"
},
{
"text": "The terminal node t, the argument nodes of t, and their source-side ordering are taken as the key for rule matching. Available rules will be assigned to the MCT of t. Then, we apply the available rules to the root nodes of each MCT through a bottom-up traversal of T E . A competitive problem is that, a non-terminal node can be shared by several MCTs. For example, node c3 and c18 (gray color) in Figure 1 are respectively shared by two MCTs (t6 and t7, t10 and t12). In order to avoid duplicated reordering of these nodes, we first pick the pre-ordering rule in which there are no \"gaps\" among the predicate words and argument phrases. For example, there is a gap (t6) between t7 and its argument node c4. We then pick a rule by frequency if there are still more than one rule available. Finally, after applying all available rules, we collect the pre-ordered source sentence from the root node of the HPSG tree.",
"cite_spans": [],
"ref_spans": [
{
"start": 398,
"end": 406,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Applying pre-ordering rules",
"sec_num": "2.4"
},
{
"text": "We test our proposal by translating from English to Japanese. We use the NTCIR-9 English-Japanese patent corpus 4 as our experiment set. Since the reference set of the official test set has not been released yet, we instead split the original development set averagely into two parts, named dev.a and dev.b. In our experiments, we first take dev.a as our development set for minimum-error rate tuning (Och, 2003) and then report the final translation accuracies on dev.b. For direct comparison with other systems in the future, we use the configuration of the official baseline system 5 :",
"cite_spans": [
{
"start": 401,
"end": 412,
"text": "(Och, 2003)",
"ref_id": "BIBREF15"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Setup",
"sec_num": "3.1"
},
{
"text": "\u2022 Moses 6 : revision = \"3717\" as the baseline decoder. Note that we also train Moses using HFE sentences (Isozaki et al., 2010b) and the English sentences pre-ordered by PASs;",
"cite_spans": [
{
"start": 105,
"end": 128,
"text": "(Isozaki et al., 2010b)",
"ref_id": "BIBREF6"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Setup",
"sec_num": "3.1"
},
{
"text": "\u2022 GIZA++: giza-pp-v1.0.3 7 (Och and Ney, 2003) for first training word alignment using the original English sentences for preordering rule extraction, and then for retrain- ing word alignments using the pre-ordered English sentences;",
"cite_spans": [
{
"start": 27,
"end": 46,
"text": "(Och and Ney, 2003)",
"ref_id": "BIBREF14"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Setup",
"sec_num": "3.1"
},
{
"text": "\u2022 SRILM 8 (Stolcke, 2002) : version 1.5.12 for training a 5-gram language model using the target sentences in the total training set;",
"cite_spans": [
{
"start": 10,
"end": 25,
"text": "(Stolcke, 2002)",
"ref_id": "BIBREF19"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Setup",
"sec_num": "3.1"
},
{
"text": "\u2022 Additional scripts 9 : for preprocessing English sentences and cleaning up too long (# of words > 40) parallel sentences;",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Setup",
"sec_num": "3.1"
},
{
"text": "\u2022 Japanese word segmentation: Mecab v0.98 10 with the dictionary of mecab-ipadic-2.7.0-20070801.tar.gz 11 .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Setup",
"sec_num": "3.1"
},
{
"text": "The statistics of the filtered training set, dev.a, and dev.b are shown in Table 3 . The success parsing rate ranges from 98.7% to 99.3% by using Enju2.3.1. The averaged parsing time for each English sentence ranges from 0.30 to 0.48 seconds.",
"cite_spans": [],
"ref_spans": [
{
"start": 75,
"end": 82,
"text": "Table 3",
"ref_id": "TABREF6"
}
],
"eq_spans": [],
"section": "Setup",
"sec_num": "3.1"
},
{
"text": "pre-ordering rules Figure 2 shows the number (natural log) of the 40 types of the PASs that appeared in the HPSG trees of the three experiment sets. Top five types of opened PASs include adj arg1, det arg1, prep arg12, noun arg1, and verb arg12. By comparing the distributions of the number of PASs in the three sets, we can see that the distributions approximately share the same tendency. Thus, the pre-ordering rules learned from the PASs in the training set can be expected to be properly applied in dev.a and dev.b. Besides, the statistics of the number of arguments for the predicate words is shown in Table 4 . From this table, we find that the ratio of the number of arguments in the three sets are approximately similar. In particular, nearly half of the predicate words have one argument. The number of predicate words that contain two arguments occurs around 30.0% of all the predicate words. Also, we can not extract pre-ordering rules from around 23.0% of the predicate words since they do not contain any arguments. Finally, less than 1% of predicate words contain three arguments and we only find one four-argument example of verb arg1234 in the training set. Now, in Table 5 , we show the statistics of predicate words in the training set for pre-ordering rule extraction. Of the 48.3 million English words in the training set, there are 45.6 million words (94.4%) that are included in the HPSG trees that were successfully generated. Then, in the PASs of these 45.6 million words, there are 35.0 million words whose PASs are opened. We also list the number (34.0 million) of aligned predicate words, since we only extract pre-ordering rules from predicate words that are aligned to some target word(s) in Algorithm 1. Finally, there are 89.1% of aligned predicate words that are aligned to contiguous target words.",
"cite_spans": [],
"ref_spans": [
{
"start": 19,
"end": 27,
"text": "Figure 2",
"ref_id": "FIGREF1"
},
{
"start": 608,
"end": 616,
"text": "Table 4",
"ref_id": "TABREF8"
},
{
"start": 1184,
"end": 1191,
"text": "Table 5",
"ref_id": "TABREF9"
}
],
"eq_spans": [],
"section": "Statistics of PASs and PAS-based",
"sec_num": "3.2"
},
{
"text": "In order to investigate the sub-categorization effectiveness of the syntactic features included in the pre-ordering rules, we pick four subsets of the total feature set (Table 1) . These feature subsets, named from PAS-a to PAS-d, are listed in Table 6 . Through the comparison of these four feature subsets, we also attempt to investigate the datasparseness problem of available pre-ordering rules cased by the factored features.",
"cite_spans": [],
"ref_spans": [
{
"start": 169,
"end": 178,
"text": "(Table 1)",
"ref_id": "TABREF2"
},
{
"start": 245,
"end": 253,
"text": "Table 6",
"ref_id": "TABREF11"
}
],
"eq_spans": [],
"section": "Statistics of PASs and PAS-based",
"sec_num": "3.2"
},
{
"text": "PAS-a includes all the syntactic features listed in Table 1 . In PAS-b, we only keep three features for the predicate word and one feature for the argu -0 2 4 6 8 10 12 14 16 18 adj_arg1 adj_arg12 adj_mod_arg1 adj_mod_arg12 app_arg12 aux_arg12 aux_mod_arg12 comp_arg1 comp_arg12 conj_arg1 conj_arg12 conj_arg123 coord_arg12 det_arg1 dtv_arg2 it_arg1 lgs_arg2 lparen_arg123 noun_arg0 noun_arg1 noun_arg12 noun_arg2 poss_arg12 poss_arg2 prep_arg12 prep_arg123 prep_mod_arg12 punct_arg1 quote_arg23 relative_arg1 relative_arg12 rparen_arg0 there_arg0 verb_arg1 verb_arg12 verb_arg123 verb_arg1234 verb_mod_arg1 verb_mod_arg12 verb_mod_arg123 train (ln) dev.a (ln) dev.b (ln) Table 6 : Feature subsets used in pre-ordering rules and statistics of the extraction and application of the pre-ordering rules under these feature subsets. ment nodes. We further remove one feature (CAT) of the predicate word in PAS-c. In the fourth subset PAS-d, we only use two features WORD and PRED in the predicate word for sub-categorizing pre-ordering rules. Thus, PAS-d is only related to PASs (which can be generated by any kinds of parser) since it does not include additional features generated by the typical HPSG parser.",
"cite_spans": [],
"ref_spans": [
{
"start": 52,
"end": 59,
"text": "Table 1",
"ref_id": "TABREF2"
},
{
"start": 152,
"end": 697,
"text": "-0 2 4 6 8 10 12 14 16 18 adj_arg1 adj_arg12 adj_mod_arg1 adj_mod_arg12 app_arg12 aux_arg12 aux_mod_arg12 comp_arg1 comp_arg12 conj_arg1 conj_arg12 conj_arg123 coord_arg12 det_arg1 dtv_arg2 it_arg1 lgs_arg2 lparen_arg123 noun_arg0 noun_arg1 noun_arg12 noun_arg2 poss_arg12 poss_arg2 prep_arg12 prep_arg123 prep_mod_arg12 punct_arg1 quote_arg23 relative_arg1 relative_arg12 rparen_arg0 there_arg0 verb_arg1 verb_arg12 verb_arg123 verb_arg1234 verb_mod_arg1 verb_mod_arg12 verb_mod_arg123",
"ref_id": "TABREF2"
},
{
"start": 731,
"end": 738,
"text": "Table 6",
"ref_id": "TABREF11"
}
],
"eq_spans": [],
"section": "Statistics of PASs and PAS-based",
"sec_num": "3.2"
},
{
"text": "As the number of syntactic features decreases, more rules can be unified together. Thus, the number of pre-ordering rules and reordering rules, as shown in Table 7 shows the final translation accuracies under BLEU score (Papineni et al., 2002) and RIBES 12 , i.e., the software implementation of Normalized Kendall's \u03c4 as proposed by (Isozaki et al., 2010a) to automatically evaluate the translation between distant language pairs based on rank correlation coefficients and significantly penalizes word order mistakes. Making use of our preordered English sentences significantly (p < 0.01) improved BLEU scores from 2.47 (PAS-d) to 3.15 (PAS-a) points. The effectiveness of our proposal for tackling word-ordering problem can also be proved by comparing the scores of RIBES. In addition, the accuracies change slightly among using the four types of pre-ordering rules. Among PAS-a, PAS-b, and PAS-c, we did significant test and could not differ them under p < 0.01 or p < 0.05. The only significant difference Table 8 : Translation accuracies by combining HFE and PAS based pre-ordering approach.",
"cite_spans": [
{
"start": 220,
"end": 243,
"text": "(Papineni et al., 2002)",
"ref_id": "BIBREF16"
},
{
"start": 334,
"end": 357,
"text": "(Isozaki et al., 2010a)",
"ref_id": "BIBREF5"
}
],
"ref_spans": [
{
"start": 156,
"end": 163,
"text": "Table 7",
"ref_id": "TABREF12"
},
{
"start": 1011,
"end": 1018,
"text": "Table 8",
"ref_id": null
}
],
"eq_spans": [],
"section": "Statistics of PASs and PAS-based",
"sec_num": "3.2"
},
{
"text": "(p < 0.05) appeared between PAS-a and PAS-d. Thus, we argue that the factored syntactic features such as WORD, PRED, and CAT are more essential for sub-categorizing pre-ordering rules than the remaining syntactic features.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Results",
"sec_num": "3.3"
},
{
"text": "As former mentioned, we also take the language-dependent HFE approach (Isozaki et al., 2010b) as another baseline. Note that word alignment was retrained using head-finalized English sentences and Japanese sentences in this HFE approach. Through comparing the HFE results listed in Table 8 , we observe that the results are comparable between PAS-a and HFE: HFE is slightly better under BLEU score and PAS-a is slightly better under RIBES score.",
"cite_spans": [
{
"start": 70,
"end": 93,
"text": "(Isozaki et al., 2010b)",
"ref_id": "BIBREF6"
}
],
"ref_spans": [
{
"start": 282,
"end": 289,
"text": "Table 8",
"ref_id": null
}
],
"eq_spans": [],
"section": "Results",
"sec_num": "3.3"
},
{
"text": "Since similar HPSG parser (Enju) yet different linguistic information (syntactic head information vs. PASs) are used in HFE approach and our proposal. A straightforward question is whether we can combine these approaches together. Under this motivation, we select a better pre-ordered English sentence generated by the HFE method and our PAS-based method. Following (Genzel, 2010) , we use crossing score as the metric for sentence selection. Crossing score is the number of crossing alignment links for a given aligned sentence pair. For monotonic alignments without reordering, crossing score is zero. During selection, we found that nearly 10% of the pre-ordered English sentences yielded by head-finalization and PAS-based methods were similar. In addition, among the different sentences, around 30% of PAS-based pre-ordering sentences were selected. Since we can not compute crossing score in the development/test sets, we instead take both kinds of pre-ordered English sentences as inputs and pick one output with a higher translation score.",
"cite_spans": [
{
"start": 366,
"end": 380,
"text": "(Genzel, 2010)",
"ref_id": "BIBREF4"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Results",
"sec_num": "3.3"
},
{
"text": "The translation result based on this reselection approach is shown in Table 8 . Compared with HFE approach, the reselection approach significantly (p < 0.01) improved BLEU scores of from 1.22 (PAS-d) to 1.68 (PAS-b) points. These interesting results reflect that syntactic head infor- mation and PASs describe the linguistic information of an English sentence in different aspects. Furthermore, compared with the single headfinalization rule, the automatically extracted preordering rules kept the variety of word-ordering by dynamically inferring the word order of target sentences and thus enlarged the reordering space.",
"cite_spans": [],
"ref_spans": [
{
"start": 70,
"end": 77,
"text": "Table 8",
"ref_id": null
}
],
"eq_spans": [],
"section": "Results",
"sec_num": "3.3"
},
{
"text": "In order to investigate how closely the pre-ordered English sentences follow target language word order, we measured Kendall's \u03c4 (Kendall, 1948) , a rank correlation coefficient, as shown in Table 9 . We exactly follow Isozaki et al. (2010b) to compute Kendall's \u03c4 . From Table 9 , we can see that the quality of word alignments approximately reflects the final BLEU scores listed in Table 7 and 8.",
"cite_spans": [
{
"start": 129,
"end": 144,
"text": "(Kendall, 1948)",
"ref_id": "BIBREF8"
},
{
"start": 219,
"end": 241,
"text": "Isozaki et al. (2010b)",
"ref_id": "BIBREF6"
}
],
"ref_spans": [
{
"start": 191,
"end": 198,
"text": "Table 9",
"ref_id": "TABREF15"
},
{
"start": 272,
"end": 279,
"text": "Table 9",
"ref_id": "TABREF15"
},
{
"start": 384,
"end": 391,
"text": "Table 7",
"ref_id": "TABREF12"
}
],
"eq_spans": [],
"section": "Alignment comparison",
"sec_num": "3.4"
},
{
"text": "We have proposed a pre-ordering approach by making use of predicate argument structures. The pre-ordering rules record the relative source-target position mapping among predicate words and their argument phrases. We first proposed an algorithm for automatically extracting these lexical pre-ordering rules from aligned HPSG-tree-tostring pairs. Then, we apply these pre-ordering rules to HPSG trees to yield pre-ordered source sentences that follow the word order of target sentences. Finally, we do word alignment again by using the pre-ordered source sentences together with the original target sentences. Employing Moses , our proposal significantly improved 2.47\u223c3.15 BLEU points compared with using the original English sentences. Combining with the HFE approach (Isozaki et al., 2010b) , our approach significantly and impressively improved 5.29 points of BLEU score from 0.2773 to 0.3302. We finally argue that our proposal is not difficult to be implemented and can be easily applied to translate English into other languages.",
"cite_spans": [
{
"start": 768,
"end": 791,
"text": "(Isozaki et al., 2010b)",
"ref_id": "BIBREF6"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "4"
},
{
"text": "These word alignments are gained by running GIZA++(Och and Ney, 2003) on the original parallel sentences.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "In this example, the overlapping is caused by the wrong/ambiguous alignments between \"used\" and \"naru15\", and between \"is\" and \"ha6\".",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "http://ntcir.nii.ac.jp/PatentMT/ 5 http://ntcir.nii.ac.jp/PatentMT/baselineSystems 6 http://www.statmt.org/moses/ 7 http://giza-pp.googlecode.com/files/giza-pp-v1.0.3.tar.gz",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "http://www.speech.sri.com/projects/srilm/ 9 http://homepages.inf.ed.ac.uk/jschroe1/howto/scripts.tgz 10 http://sourceforge.net/projects/mecab/files/ 11 http://sourceforge.net/projects/mecab/files/mecabipadic/",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "Code available at http://www.kecl.ntt.co.jp/icl/lirg/ribes",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "The mathematics of statistical machine translation: parameter estimation",
"authors": [
{
"first": "F",
"middle": [],
"last": "Peter",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Brown",
"suffix": ""
},
{
"first": "J",
"middle": [
"Della"
],
"last": "Vincent",
"suffix": ""
},
{
"first": "Stephen",
"middle": [
"A"
],
"last": "Pietra",
"suffix": ""
},
{
"first": "Robert",
"middle": [
"L"
],
"last": "Della Pietra",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Mercer",
"suffix": ""
}
],
"year": 1993,
"venue": "Computational Linguistics",
"volume": "19",
"issue": "2",
"pages": "263--311",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Peter F. Brown, Vincent J. Della Pietra, Stephen A. Della Pietra, and Robert L. Mercer. 1993. The mathematics of statistical machine translation: parameter estimation. Computational Linguistics, 19(2):263-311.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "A hierarchical phrase-based model for statistical machine translation",
"authors": [
{
"first": "David",
"middle": [],
"last": "Chiang",
"suffix": ""
}
],
"year": 2005,
"venue": "Proc.of ACL",
"volume": "",
"issue": "",
"pages": "263--270",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "David Chiang. 2005. A hierarchical phrase-based model for statistical machine translation. In Proc.of ACL, pages 263-270.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Clause restructuring for statistical machine translation",
"authors": [
{
"first": "Michael",
"middle": [],
"last": "Collins",
"suffix": ""
},
{
"first": "Philipp",
"middle": [],
"last": "Koehn",
"suffix": ""
},
{
"first": "Ivona",
"middle": [],
"last": "Kucerova",
"suffix": ""
}
],
"year": 2005,
"venue": "Proc.of ACL",
"volume": "",
"issue": "",
"pages": "531--540",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Michael Collins, Philipp Koehn, and Ivona Kucerova. 2005. Clause restructuring for statistical machine translation. In Proc.of ACL, pages 531-540.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "What's in a translation rule",
"authors": [
{
"first": "Michel",
"middle": [],
"last": "Galley",
"suffix": ""
},
{
"first": "Mark",
"middle": [],
"last": "Hopkins",
"suffix": ""
},
{
"first": "Kevin",
"middle": [],
"last": "Knight",
"suffix": ""
},
{
"first": "Daniel",
"middle": [],
"last": "Marcu",
"suffix": ""
}
],
"year": 2004,
"venue": "Proc.of HLT-NAACL",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Michel Galley, Mark Hopkins, Kevin Knight, and Daniel Marcu. 2004. What's in a translation rule? In Proc.of HLT-NAACL.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Automatically learning sourceside reordering rules for large scale machine translation",
"authors": [
{
"first": "Dmitriy",
"middle": [],
"last": "Genzel",
"suffix": ""
}
],
"year": 2010,
"venue": "Proc.of COLING",
"volume": "",
"issue": "",
"pages": "376--384",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Dmitriy Genzel. 2010. Automatically learning source- side reordering rules for large scale machine transla- tion. In Proc.of COLING, pages 376-384.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Automatic evaluation of translation quality for distant language pairs",
"authors": [
{
"first": "Hideki",
"middle": [],
"last": "Isozaki",
"suffix": ""
},
{
"first": "Tsutomu",
"middle": [],
"last": "Hirao",
"suffix": ""
},
{
"first": "Kevin",
"middle": [],
"last": "Duh",
"suffix": ""
},
{
"first": "Katsuhito",
"middle": [],
"last": "Sudoh",
"suffix": ""
},
{
"first": "Hajime",
"middle": [],
"last": "Tsukada",
"suffix": ""
}
],
"year": 2010,
"venue": "Proc.of EMNLP",
"volume": "",
"issue": "",
"pages": "944--952",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hideki Isozaki, Tsutomu Hirao, Kevin Duh, Katsuhito Sudoh, and Hajime Tsukada. 2010a. Automatic evaluation of translation quality for distant language pairs. In Proc.of EMNLP, pages 944-952.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Head finalization: A simple reordering rule for sov languages",
"authors": [
{
"first": "Hideki",
"middle": [],
"last": "Isozaki",
"suffix": ""
},
{
"first": "Katsuhito",
"middle": [],
"last": "Sudoh",
"suffix": ""
},
{
"first": "Hajime",
"middle": [],
"last": "Tsukada",
"suffix": ""
},
{
"first": "Kevin",
"middle": [],
"last": "Duh",
"suffix": ""
}
],
"year": 2010,
"venue": "Proc.of WMT-MetricsMATR",
"volume": "",
"issue": "",
"pages": "244--251",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hideki Isozaki, Katsuhito Sudoh, Hajime Tsukada, and Kevin Duh. 2010b. Head finalization: A simple reordering rule for sov languages. In Proc.of WMT- MetricsMATR, pages 244-251.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Syntactic reordering in preprocessing for japanese-english translation: Mit system description for ntcir-7 patent translation task",
"authors": [
{
"first": "Jason",
"middle": [],
"last": "Katz-Brown",
"suffix": ""
},
{
"first": "Michael",
"middle": [],
"last": "Collins",
"suffix": ""
}
],
"year": 2007,
"venue": "Proc.of NTCIR-7 Workshop Meeting",
"volume": "",
"issue": "",
"pages": "409--414",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jason Katz-Brown and Michael Collins. 2007. Syntac- tic reordering in preprocessing for japanese-english translation: Mit system description for ntcir-7 patent translation task. In Proc.of NTCIR-7 Workshop Meeting, pages 409-414.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Rank Correlation Methods",
"authors": [
{
"first": "Maurice",
"middle": [],
"last": "Kendall",
"suffix": ""
}
],
"year": 1948,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Maurice Kendall. 1948. Rank Correlation Methods. Charles Griffin.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Moses: Open source toolkit for statistical machine translation",
"authors": [
{
"first": "Philipp",
"middle": [],
"last": "Koehn",
"suffix": ""
},
{
"first": "Hieu",
"middle": [],
"last": "Hoang",
"suffix": ""
},
{
"first": "Alexandra",
"middle": [],
"last": "Birch",
"suffix": ""
},
{
"first": "Chris",
"middle": [],
"last": "Callison-Burch",
"suffix": ""
},
{
"first": "Marcello",
"middle": [],
"last": "Federico",
"suffix": ""
},
{
"first": "Nicola",
"middle": [],
"last": "Bertoldi",
"suffix": ""
},
{
"first": "Brooke",
"middle": [],
"last": "Cowan",
"suffix": ""
},
{
"first": "Wade",
"middle": [],
"last": "Shen",
"suffix": ""
}
],
"year": 2007,
"venue": "the ACL 2007 Demo-Poster",
"volume": "",
"issue": "",
"pages": "177--180",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Philipp Koehn, Hieu Hoang, Alexandra Birch, Chris Callison-Burch, Marcello Federico, Nicola Bertoldi, Brooke Cowan, Wade Shen, Christine Moran, Richard Zens, Chris Dyer, Ond\u0159ej Bojar, Alexandra Constantin, and Evan Herbst. 2007. Moses: Open source toolkit for statistical machine translation. In the ACL 2007 Demo-Poster, pages 177-180.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Phrase reordering for statistical machine translation based on predicate-argument structure",
"authors": [
{
"first": "Mamoru",
"middle": [],
"last": "Komachi",
"suffix": ""
},
{
"first": "Masaaki",
"middle": [],
"last": "Nagata",
"suffix": ""
},
{
"first": "Yuji",
"middle": [],
"last": "Matsumoto",
"suffix": ""
}
],
"year": 2006,
"venue": "Proc.of IWSLT",
"volume": "",
"issue": "",
"pages": "77--82",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mamoru Komachi, Masaaki Nagata, and Yuji Mat- sumoto. 2006. Phrase reordering for statistical ma- chine translation based on predicate-argument struc- ture. In Proc.of IWSLT, pages 77-82.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Local phrase reordering models for statistical machine translation",
"authors": [
{
"first": "Shankar",
"middle": [],
"last": "Kumar",
"suffix": ""
},
{
"first": "William",
"middle": [],
"last": "Byrne",
"suffix": ""
}
],
"year": 2005,
"venue": "Proc.of HLT-EMNLP",
"volume": "",
"issue": "",
"pages": "161--168",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Shankar Kumar and William Byrne. 2005. Lo- cal phrase reordering models for statistical machine translation. In Proc.of HLT-EMNLP, pages 161- 168.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Treeto-string alignment templates for statistical machine transaltion",
"authors": [
{
"first": "Yang",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Qun",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Shouxun",
"middle": [],
"last": "Lin",
"suffix": ""
}
],
"year": 2006,
"venue": "Proc.of COLING-ACL",
"volume": "",
"issue": "",
"pages": "609--616",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yang Liu, Qun Liu, and Shouxun Lin. 2006. Tree- to-string alignment templates for statistical machine transaltion. In Proc.of COLING-ACL, pages 609- 616, Sydney, Australia.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Feature forest models for probabilistic hpsg parsing",
"authors": [
{
"first": "Yusuke",
"middle": [],
"last": "Miyao",
"suffix": ""
},
{
"first": "Jun'ichi",
"middle": [],
"last": "Tsujii",
"suffix": ""
}
],
"year": 2008,
"venue": "Computational Lingustics",
"volume": "34",
"issue": "1",
"pages": "35--80",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yusuke Miyao and Jun'ichi Tsujii. 2008. Feature for- est models for probabilistic hpsg parsing. Computa- tional Lingustics, 34(1):35-80.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "A systematic comparison of various statistical alignment models",
"authors": [
{
"first": "Josef",
"middle": [],
"last": "Franz",
"suffix": ""
},
{
"first": "Hermann",
"middle": [],
"last": "Och",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Ney",
"suffix": ""
}
],
"year": 2003,
"venue": "Computational Linguistics",
"volume": "29",
"issue": "1",
"pages": "19--51",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Franz Josef Och and Hermann Ney. 2003. A sys- tematic comparison of various statistical alignment models. Computational Linguistics, 29(1):19-51.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "Minimum error rate training in statistical machine translation",
"authors": [
{
"first": "Franz Josef",
"middle": [],
"last": "Och",
"suffix": ""
}
],
"year": 2003,
"venue": "Proc.of ACL",
"volume": "",
"issue": "",
"pages": "160--167",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Franz Josef Och. 2003. Minimum error rate training in statistical machine translation. In Proc.of ACL, pages 160-167.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "Bleu: a method for automatic evaluation of machine translation",
"authors": [
{
"first": "Kishore",
"middle": [],
"last": "Papineni",
"suffix": ""
},
{
"first": "Salim",
"middle": [],
"last": "Roukos",
"suffix": ""
},
{
"first": "Todd",
"middle": [],
"last": "Ward",
"suffix": ""
},
{
"first": "Wei-Jing",
"middle": [],
"last": "Zhu",
"suffix": ""
}
],
"year": 2002,
"venue": "Proc.of ACL",
"volume": "",
"issue": "",
"pages": "311--318",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic eval- uation of machine translation. In Proc.of ACL, pages 311-318.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "Head-Driven Phrase Structure Grammar",
"authors": [
{
"first": "Carl",
"middle": [],
"last": "Pollard",
"suffix": ""
},
{
"first": "Ivan",
"middle": [
"A"
],
"last": "Sag",
"suffix": ""
}
],
"year": 1994,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Carl Pollard and Ivan A. Sag. 1994. Head-Driven Phrase Structure Grammar. University of Chicago Press.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "Word reordering in statistical machine translation with a pos-based distortion model",
"authors": [
{
"first": "Kay",
"middle": [],
"last": "Rottmann",
"suffix": ""
},
{
"first": "Stephan",
"middle": [],
"last": "Vogel",
"suffix": ""
}
],
"year": 2007,
"venue": "Proc.of TMI",
"volume": "",
"issue": "",
"pages": "171--180",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Kay Rottmann and Stephan Vogel. 2007. Word re- ordering in statistical machine translation with a pos-based distortion model. In Proc.of TMI, pages 171-180, Skovde, Sweden.",
"links": null
},
"BIBREF19": {
"ref_id": "b19",
"title": "Srilm-an extensible language modeling toolkit",
"authors": [
{
"first": "Andreas",
"middle": [],
"last": "Stolcke",
"suffix": ""
}
],
"year": 2002,
"venue": "Proc.of ICSLP",
"volume": "",
"issue": "",
"pages": "901--904",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Andreas Stolcke. 2002. Srilm-an extensible language modeling toolkit. In Proc.of ICSLP, pages 901-904.",
"links": null
},
"BIBREF20": {
"ref_id": "b20",
"title": "A unigram orientation model for statistical machine translation",
"authors": [
{
"first": "Christoph",
"middle": [],
"last": "Tillman",
"suffix": ""
}
],
"year": 2004,
"venue": "",
"volume": "",
"issue": "",
"pages": "101--104",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Christoph Tillman. 2004. A unigram orienta- tion model for statistical machine translation. In Daniel Marcu Susan Dumais and Salim Roukos, ed- itors, HLT-NAACL 2004, pages 101-104.",
"links": null
},
"BIBREF21": {
"ref_id": "b21",
"title": "Hmm-based word alignment in statistical translation",
"authors": [
{
"first": "Stephan",
"middle": [],
"last": "Vogel",
"suffix": ""
},
{
"first": "Hermann",
"middle": [],
"last": "Ney",
"suffix": ""
},
{
"first": "Christoph",
"middle": [],
"last": "Tillmann",
"suffix": ""
}
],
"year": 1996,
"venue": "COLING",
"volume": "",
"issue": "",
"pages": "836--841",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Stephan Vogel, Hermann Ney, and Christoph Tillmann. 1996. Hmm-based word alignment in statistical translation. In COLING, pages 836-841.",
"links": null
},
"BIBREF22": {
"ref_id": "b22",
"title": "Chinese syntactic reordering for statistical machine translation",
"authors": [
{
"first": "Chao",
"middle": [],
"last": "Wang",
"suffix": ""
},
{
"first": "Michael",
"middle": [],
"last": "Collins",
"suffix": ""
},
{
"first": "Philipp",
"middle": [],
"last": "Koehn",
"suffix": ""
}
],
"year": 2007,
"venue": "Proc.of EMNLP-CoNLL",
"volume": "",
"issue": "",
"pages": "737--745",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Chao Wang, Michael Collins, and Philipp Koehn. 2007. Chinese syntactic reordering for statistical machine translation. In Proc.of EMNLP-CoNLL, pages 737-745.",
"links": null
},
"BIBREF23": {
"ref_id": "b23",
"title": "Fine-grained tree-to-string translation rule extraction",
"authors": [
{
"first": "Xianchao",
"middle": [],
"last": "Wu",
"suffix": ""
},
{
"first": "Takuya",
"middle": [],
"last": "Matsuzaki",
"suffix": ""
},
{
"first": "Jun'ichi",
"middle": [],
"last": "Tsujii",
"suffix": ""
}
],
"year": 2010,
"venue": "Proc.of the 48th ACL",
"volume": "",
"issue": "",
"pages": "325--334",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Xianchao Wu, Takuya Matsuzaki, and Jun'ichi Tsujii. 2010a. Fine-grained tree-to-string translation rule extraction. In Proc.of the 48th ACL, pages 325-334.",
"links": null
},
"BIBREF24": {
"ref_id": "b24",
"title": "Improve syntax-based translation using deep syntactic structures. Machine Translation (Special Issue : Pushing the frontiers of SMT)",
"authors": [
{
"first": "Xianchao",
"middle": [],
"last": "Wu",
"suffix": ""
},
{
"first": "Takuya",
"middle": [],
"last": "Matsuzaki",
"suffix": ""
},
{
"first": "Jun'ichi",
"middle": [],
"last": "Tsujii",
"suffix": ""
}
],
"year": 2010,
"venue": "",
"volume": "24",
"issue": "",
"pages": "141--157",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Xianchao Wu, Takuya Matsuzaki, and Jun'ichi Tsujii. 2010b. Improve syntax-based translation using deep syntactic structures. Machine Translation (Special Issue : Pushing the frontiers of SMT), 24(2):141- 157.",
"links": null
},
"BIBREF25": {
"ref_id": "b25",
"title": "Improving a statistical mt system with automatically learned rewrite patterns",
"authors": [
{
"first": "Fei",
"middle": [],
"last": "Xia",
"suffix": ""
},
{
"first": "Michael",
"middle": [],
"last": "Mccord",
"suffix": ""
}
],
"year": 2004,
"venue": "Proc.of COLING",
"volume": "",
"issue": "",
"pages": "508--514",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Fei Xia and Michael McCord. 2004. Improving a sta- tistical mt system with automatically learned rewrite patterns. In Proc.of COLING, pages 508-514.",
"links": null
},
"BIBREF26": {
"ref_id": "b26",
"title": "Maximum entropy based phrase reordering model for statistical machine translation",
"authors": [
{
"first": "Deyi",
"middle": [],
"last": "Xiong",
"suffix": ""
},
{
"first": "Qun",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Shouxun",
"middle": [],
"last": "Lin",
"suffix": ""
}
],
"year": 2006,
"venue": "Proc.of COLING-ACL",
"volume": "",
"issue": "",
"pages": "521--528",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Deyi Xiong, Qun Liu, and Shouxun Lin. 2006. Maxi- mum entropy based phrase reordering model for sta- tistical machine translation. In Proc.of COLING- ACL, pages 521-528.",
"links": null
},
"BIBREF27": {
"ref_id": "b27",
"title": "Using a dependency parser to improve smt for subject-object-verb languages",
"authors": [
{
"first": "Peng",
"middle": [],
"last": "Xu",
"suffix": ""
},
{
"first": "Jaeho",
"middle": [],
"last": "Kang",
"suffix": ""
},
{
"first": "Michael",
"middle": [],
"last": "Ringgaard",
"suffix": ""
},
{
"first": "Franz",
"middle": [],
"last": "Och",
"suffix": ""
}
],
"year": 2009,
"venue": "Proc.of HLT-NAACL",
"volume": "",
"issue": "",
"pages": "245--253",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Peng Xu, Jaeho Kang, Michael Ringgaard, and Franz Och. 2009. Using a dependency parser to improve smt for subject-object-verb languages. In Proc.of HLT-NAACL, pages 245-253.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"uris": null,
"type_str": "figure",
"num": null,
"text": "Illustration of a word-aligned HPSG-tree-to-string pair for English-to-Japanese translation."
},
"FIGREF1": {
"uris": null,
"type_str": "figure",
"num": null,
"text": "Number (natural log) of the types of the PASs that appeared in the experiment sets."
},
"TABREF2": {
"num": null,
"text": "",
"type_str": "table",
"html": null,
"content": "<table/>"
},
"TABREF4": {
"num": null,
"text": "",
"type_str": "table",
"html": null,
"content": "<table><tr><td>: PAS-based pre-ordering rules extracted</td></tr><tr><td>from the example shown in Figure 1. We use real</td></tr><tr><td>words instead of predicate nodes here for intuitive</td></tr><tr><td>understanding.</td></tr><tr><td>Algorithm 2 Pre-ordering Rule Application</td></tr><tr><td>1: TE.localize() 2: TE.computeSrcSpans() 3: mct rule \u2190 {} 4: for each leaf node t of TE do 5: Node[] args \u2190 TE.getArgs(t) 6: int[] srcOrder \u2190 SORTSPANS(t.srcSpan, srcSpans of args) 7: Rule r \u2190RULEMATCH(R, < t, args, srcOrder>) 8: if r != NULL then 9: mct \u2190 TE.MCT(t, args) 10: mct rule.add(<mct, r >) 11: end if 12: end for 13: for each mct in mct rule in a bottom-up order do 14: Rule r \u2190 mct rule.get(mct) 15: mct.root().srcPhrase \u2190 '' \u25c3 root() returns root node 16: for i from 0 to r.trgOrder.length-1 do 17: mct.root().srcPhrase += ' ' + mct.leaves() [r.trgOrder[i]].srcPhrase 18: end for 19: end for 20: for each node n in TE in a topological order do 21: if n is a terminal node then 22: n.srcPhrase \u2190 E[n.srcSpan[0]] 23: else if n.srcPhrase = NULL then 24: n.srcPhrase \u2190 CONNECT(n.children().srcPhrase) 25: end if 26: end for</td></tr></table>"
},
"TABREF6": {
"num": null,
"text": "Statistics of the experiment sets.",
"type_str": "table",
"html": null,
"content": "<table/>"
},
"TABREF8": {
"num": null,
"text": "Statistics of the number of arguments of the predicate words in the experiment sets.",
"type_str": "table",
"html": null,
"content": "<table><tr><td>Number</td><td>Ratio</td></tr><tr><td colspan=\"2\">Parse success 45,617,387 94.4% Opened 35,004,893 76.7% Aligned 33,966,923 97.0% Contiguous 30,256,858 89.1%</td></tr></table>"
},
"TABREF9": {
"num": null,
"text": "Statistics of predicate words in the training set for rule extraction.",
"type_str": "table",
"html": null,
"content": "<table/>"
},
"TABREF11": {
"num": null,
"text": "",
"type_str": "table",
"html": null,
"content": "<table><tr><td>, also decreases. The number</td></tr><tr><td>of reordering rules occurs from 25.1% (PAS-d) to</td></tr><tr><td>38.2% (PAS-a) in the pre-ordering rules. For each</td></tr><tr><td>English sentence in the training set, there are aver-</td></tr><tr><td>agely 12 reordering rules (instead of monotonic</td></tr></table>"
},
"TABREF12": {
"num": null,
"text": "",
"type_str": "table",
"html": null,
"content": "<table><tr><td>: Translation accuracies by using the orig-</td></tr><tr><td>inal English sentences or the pre-ordered English</td></tr><tr><td>sentences under four types of pre-ordering rules.</td></tr><tr><td>pre-ordering rules) available under either of the</td></tr><tr><td>four feature subsets. For each English sentence in</td></tr><tr><td>dev.a and dev.b, the number of available reorder-</td></tr><tr><td>ing rules is averagely 16. Around 99.1%, 99.0%,</td></tr><tr><td>and 98.6% English sentences were respectively re-</td></tr><tr><td>ordered in the training set, dev.a set, and dev.b set.</td></tr></table>"
},
"TABREF13": {
"num": null,
"text": ".7379 11.0% 34.7% HFE+PAS-b 0.3302 0.7397 12.3% 32.8% HFE+PAS-c 0.3300 0.7380 10.8% 35.0% HFE+PAS-d 0.3256 0.7337 11.5% 32.8%",
"type_str": "table",
"html": null,
"content": "<table><tr><td colspan=\"3\">Source sent. BLEU RIBES Same</td><td>PAS</td></tr><tr><td>HFE HFE+PAS-a</td><td>0.3134 0.7370 0.3278 0</td><td>-</td><td>-</td></tr></table>"
},
"TABREF15": {
"num": null,
"text": "Comparison of Kendall's \u03c4 .",
"type_str": "table",
"html": null,
"content": "<table/>"
}
}
}
} |