File size: 87,868 Bytes
6fa4bc9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 |
{
"paper_id": "I13-1049",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T07:14:43.989718Z"
},
"title": "Hypothesis Refinement Using Agreement Constraints in Machine Translation",
"authors": [
{
"first": "Ankur",
"middle": [],
"last": "Gandhe",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Carnegie Mellon University",
"location": {
"country": "USA"
}
},
"email": "ankurgan@andrew.cmu.edu"
},
{
"first": "Rashmi",
"middle": [],
"last": "Gangadharaiah",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "IBM Research",
"location": {
"country": "India"
}
},
"email": "rashgang@in.ibm.com"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "Phrase-based machine translation like other data driven approaches, are often plagued by irregularities in the translations of words in morphologically rich languages. The phrase-pairs and the language models are unable to capture the long range dependencies which decide the inflection. This paper makes the first attempt at learning constraints between the language-pair where, the target language lacks rich linguistic resources, by automatically learning classifiers that prevent implausible phrases from being part of decoding and at the same time adds consistent phrases. The paper also shows that this approach improves translation quality on the English-Hindi language pair.",
"pdf_parse": {
"paper_id": "I13-1049",
"_pdf_hash": "",
"abstract": [
{
"text": "Phrase-based machine translation like other data driven approaches, are often plagued by irregularities in the translations of words in morphologically rich languages. The phrase-pairs and the language models are unable to capture the long range dependencies which decide the inflection. This paper makes the first attempt at learning constraints between the language-pair where, the target language lacks rich linguistic resources, by automatically learning classifiers that prevent implausible phrases from being part of decoding and at the same time adds consistent phrases. The paper also shows that this approach improves translation quality on the English-Hindi language pair.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Data driven Machine Translation approaches have gained significant attention as they do not require rich linguistic resources such as, parsers or manually built dictionaries. However, their performance largely depends on the amount of training data available (Koehn, 2005) .",
"cite_spans": [
{
"start": 259,
"end": 272,
"text": "(Koehn, 2005)",
"ref_id": "BIBREF7"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "When the source language is morphologically rich and when the amount of data available is limited, the number out-of-vocabulary (OOV) increases thereby reducing the translation quality. Popovic and Ney (2004) applied transformations to OOV verbs. Yang and Kirchoff (2006) used a back-off model to transform unknown words, where, the phrase-table entries were modified such that words sharing the same root were replaced by their stems. Others (Freeman et al., 2006; Habash, 2008) found in-vocabulary words that could be treated as morphological variants.",
"cite_spans": [
{
"start": 186,
"end": 208,
"text": "Popovic and Ney (2004)",
"ref_id": "BIBREF13"
},
{
"start": 247,
"end": 271,
"text": "Yang and Kirchoff (2006)",
"ref_id": null
},
{
"start": 443,
"end": 465,
"text": "(Freeman et al., 2006;",
"ref_id": "BIBREF2"
},
{
"start": 466,
"end": 479,
"text": "Habash, 2008)",
"ref_id": "BIBREF6"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Translating into a language that is rich in morphology from a source language that is not morphologically rich also has limitations. The main reason for this is that the source language does not usually contain all the information for inflecting the words in the target half. For language-pairs that have limited amounts of training data, it is unlikely that the Translation model comes across all forms of inflections on the target phrases. Hence, some mechanism is required in order to generate these target phrases with all possible inflections and at the same time be able to filter out the implausible hypotheses.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Certain approaches (Toutanova et al., 2008; Minkov et al., 2007; Green et al., 2012) predict inflections using syntactic and rich morphological sources for the target language. This approach cannot be applied on resource poor languages such as, Hindi or other Indian languages, which lack such rich knowledge sources. Ramanathan et al. (2009) use factored models to incorporate semantic relations and suffixes to generate inflections and case markers while translating from English to Hindi but do not consider the problem of agreement between phrases in the target sentence. William and Koehn (2011) suggested an approach to eliminate inconsistent hypotheses in a string-totree model by adding unification-based constraints to only the target-side of the synchronous grammar. Although tranfer-based MT (Lavie, 2008) uses rich feature structures, grammar rules and constraints are manually developed. In addition, rules formed for one language-pair cannot be applied to another language pair. However, it is possible to model these rules as a classification problem: Given the set of source language features that influence the inflection of the target word, we try to predict the best possible target class. The target class could be the either spontaneous words or inflections of words.",
"cite_spans": [
{
"start": 19,
"end": 43,
"text": "(Toutanova et al., 2008;",
"ref_id": "BIBREF17"
},
{
"start": 44,
"end": 64,
"text": "Minkov et al., 2007;",
"ref_id": "BIBREF10"
},
{
"start": 65,
"end": 84,
"text": "Green et al., 2012)",
"ref_id": "BIBREF5"
},
{
"start": 318,
"end": 342,
"text": "Ramanathan et al. (2009)",
"ref_id": "BIBREF15"
},
{
"start": 588,
"end": 600,
"text": "Koehn (2011)",
"ref_id": "BIBREF19"
},
{
"start": 803,
"end": 816,
"text": "(Lavie, 2008)",
"ref_id": "BIBREF9"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "This paper, specifically looks at translating from English to Hindi to predict a) Subject case markers, b) Object case markers and c) Verb phrase inflections. In many PBSMT systems, once the phrase-pairs have been extracted, it is no longer required to store the training corpus from which the phrase-pairs were extracted. However, while dealing with many morphologically rich languages, the morphological variants of the target phrase not only depend on their source phrase but also on the context in which the source phrase appeared. Hence, it is beneficial to incorporate source-side features while decoding and most PBSMT systems do not use any other information from the input sentence other than the source phrase itself.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "This paper presents an approach to improve the translation quality while translating from a morphologically poor language (such as, English) to a target language that is morphologically rich without using any rich resources such as, parsers or morphological analyzers. The contributions of the paper are summarized as follows:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "\u2022 The approach detects inconsistent hypotheses generated by the translation model by treating the task as a classification problem.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "\u2022 The approach also predicts plausible target phrases that agree with the features extracted from the input sentence.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "\u2022 The paper also shows how the incorporation of source-specific features during decoding results in better translations.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Section 2 provides motivating examples to understand the importance of the task at hand.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "We demonstrate the usefulness of our approach on Indian languages as they are rich in morphology. They are also considered as resource-poor and low-density languages due to the lack of data availability and the absence of rich knowledge sources like morphological analyzers or syntactic parsers. Hindi has a free word-order where the constituents are identified through case markers.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Motivation",
"sec_num": "2"
},
{
"text": "A few approaches generate the right inflection by a) capturing all possible variations within the target phrase (Gandhe et al., 2011) and b) use the language model to select the most fluent phrases. However, the following problems still remain: 1) Many language models typically use 4-gram or 5-gram models (even lower when the data available is scarce). Example 1a has a subject (Ram) that is masculine (masc)-3rd person (3)singular(sg)-present progressive(pp) and example 1b, has a subject (Sita) that is feminine (fem)-3rd person(3)-singular(sg)-present progressive(pp). This difference in gender, changes the inflection on the auxiliary Hindi verb raha, from 'a' (in 1a) to 'i' (in 1b). It should be noted that lower order n-gram language models fail to obtain the right translation due to the long distance dependency between the subject (Ram / Sita) and the verb phrase (khel raha hai / khel rahi hai corresponding to is playing in English) in the target language. 2) Language models are insufficient to produce the right inflections. Consider the case shown in example 2, where the translation of the English pronouns (he/she) is same in Hindi (both translate to Woh). The inflection on the axillary verb phrase (raha hai / rahi hai) is still being decided by the gender of the subject (he/she). Even if a higher order language model is employed, the language model gives equal preference to both the translations as the information about the gender of the subject is completely absent in the Hindi translation. Hence, the information that Woh corresponds to masculine in example 2a and feminine in example 2b has to come from the source sentence (He/She). 3) Most often in PBSMT systems, the subject and verb phrases are far apart and hence are extracted independently, as in the case of example 1. Since there are no constraints during decoding on which phrases to choose, mis-matched phrases may get picked. Apart from verb inflections, the presence of the case-marker 'ne' (shown in example 3) on the subject blocks the transfer of the subject's gender onto the verb phrase and the verb phrase instead gets inflected with the gender of the object(apple). This blocking/presence of case markers is also not captured by traditional PBSMT systems. ",
"cite_spans": [
{
"start": 112,
"end": 133,
"text": "(Gandhe et al., 2011)",
"ref_id": "BIBREF3"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Motivation",
"sec_num": "2"
},
{
"text": "The agreement constraints can be applied to either the translation model or the language model, such that implausible combination of phrases are not picked for the best hypothesis. In our approach, we apply the agreement constraints on the translation model by filtering phrase-pairs which have an incorrect inflection on the target phrase. Since the problem of inconsistent output is mainly due to the subject, object and verb phrases, we determine agreement constraints only for these target words. For instance, suppose a 'female' gender inflection is expected on the target verb. Then, any phrase that contains 'male' gender inflection on the verb will produce an inconsistent translation and hence should be penalized. We can also add phrase-pairs when the correct inflection is not present in the phrase table.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Model",
"sec_num": "3"
},
{
"text": "The easiest way to filter the inconsistent phrasepairs is to create manual rules to look at the English source side that specify the possible set of target translations and discard the rest. For instance, using example 3 in Section 2, we could create a manual rule, \"When the English verb tense is 'past', Hindi subject takes the case marker 'ne' and the verb phrase takes the gender and number of the 'subject' \". However, this is time consuming and it is difficult to create an exhaustive list of such rules. Hence, it is imperative that we learn these rules from data. In this paper, we use multi-class support vector machine (Crammer and Singer, 2001) classifiers that use features only from the input source sentence to predict possible target case marker/inflections for the subject, object and verb phrases in the target sentence. We treat these as the allowed inflections on the target phrases and penalize phrase-pairs that do not contain the predicted target inflections. This methodology is expected to prevent implausible sentences being translated and improve the overall fluency of the translated sentence.",
"cite_spans": [
{
"start": 629,
"end": 655,
"text": "(Crammer and Singer, 2001)",
"ref_id": "BIBREF1"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Model",
"sec_num": "3"
},
{
"text": "We model the prediction of the possible target inflections for a given input sentence as a classification problem. We build different classifiers 1 to predict the target inflections of parts of the input sentence for which the translations are dependent on long range morphological rules. The features that we use for the different classifiers are listed in Section 5. The classifiers built are as follows:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Classification",
"sec_num": "4"
},
{
"text": "Subject Classifier (SubCM) and Object Classifier (ObjCM): predicts the case marker on the subject and the object.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Classification",
"sec_num": "4"
},
{
"text": "Verb Phrase Classifier (Vp): is used to predict the inflections on the verbs.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Classification",
"sec_num": "4"
},
{
"text": "Subject and Object phrases, when translated from English into a morphological rich language, often contain inflections of gender and number. Some languages also generate a case marker to denote the subject or the object. If such a case marker is not present, the target sentence often may not make sense. For our experiments from English to Hindi, we looked at predicting the correct case marker. To obtain the possible case markers that can come after a subject or the object in target language (in our case Hindi), we look at all the case markers following a subject and those that follow the object. If a language has linguistic resources such as parsers, this can be done easily. Since Hindi, and many other languages do not have a good parser, we make use of automatic word alignments obtained from bilingual data to project the subject information from English to Hindi, and determine the case markers following the subject and the object on the target side. Using this technique, we found 4 classes for the subject classifier and 3 1 we use the libsvm library: http://www.csie.ntu.edu.tw/ cjlin/libsvm/ classes for the object classifier. For the prediction of the classes, we use all the noun phrase features (in Section 5.1), tense feature of the verb phrase (in Section 5.2) and tense conjugate features (in Section 5.3).",
"cite_spans": [
{
"start": 1039,
"end": 1040,
"text": "1",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Subject and Object Classifier",
"sec_num": "4.1"
},
{
"text": "Vp NULL NULL X raha tha (was X+ing) ne",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "SubCM ObjCM",
"sec_num": null
},
{
"text": "ko (of) X+nA chahiye (should X) ke (of) mein (in) X+nI chahiye (should X) ki (of)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "SubCM ObjCM",
"sec_num": null
},
{
"text": "X+A gayA (was X+ed) ... ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "SubCM ObjCM",
"sec_num": null
},
{
"text": "Verb phrases contain morphological information about the gender, number, person, tense and aspect of the sentence. It is hence important to produce the right inflections and auxiliary verbs. Since it is impractical to have a class for each verb, we convert the verb phrases to an abstract form and also predict the target verb phrase in its abstract form. For instance, the verb phrase 'was playing' will be generalized to 'was X+ing' form and the corresponding predicted class would be 'X raha tha'.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Verb Phrase Classifier 1",
"sec_num": "4.2"
},
{
"text": "A simple approach to find the possible output forms of the classifier is to mine the target language data for all the verb phrases, rank them by frequency and filter them based on a threshold to yield the different forms that the verbs can take in the language. The aggregated verb phrases can be normalized by replacing the root verb in these phrases by an 'X' tag to obtain the possible abstract forms for the target verb phrases. For Hindi, verb phrases were identified by using a simple part-of-speech (POS) tagger to tag the monolingual data and to capture continuous sequences of 'V' tags. We found 120 Hindi verb classes in all. Some of these classes are listed in Table 1 . We use all the features listed in Section 5.",
"cite_spans": [],
"ref_spans": [
{
"start": 672,
"end": 679,
"text": "Table 1",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Verb Phrase Classifier 1",
"sec_num": "4.2"
},
{
"text": "Having too many classes for verb phrases causes the following problems: a) During our initial experiments we found that out of the 120 verb classes specified by us, only 60 were present in the bilingual training data. This reduces the chances of predicting a correct class since the classifier does not see all classes during training. b) The classifier sees only a few instances of each class. To simplify the verb phrase prediction, we split the prediction such that instead of predicting each verb form, we predict each 'kind' of inflection that modifies the verb phrase. Since each verb phrase in our training data contains information about the gender, number and person, each class now has ample amount of training examples.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Verb Phrase Classifier 2",
"sec_num": "4.3"
},
{
"text": "Gender Classifier (VpG): This classifier predicts the gender inflections on the target verb phrases using features from the source sentence.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Verb Phrase Classifier 2",
"sec_num": "4.3"
},
{
"text": "Number Classifier (VpN): This classifier predicts the number inflections on the target verb phrases using features from the source sentence.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Verb Phrase Classifier 2",
"sec_num": "4.3"
},
{
"text": "Person Classifier (VpP): This classifier predicts the Person information of the target verb phrases given the source sentence features.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Verb Phrase Classifier 2",
"sec_num": "4.3"
},
{
"text": "The three classifiers have two, two and three classes, respectively.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Verb Phrase Classifier 2",
"sec_num": "4.3"
},
{
"text": "The predicted gender, number and person is then used to select the target verb form: Base Verb form Function: Given the input English verb phrase, this function outputs all possible translations (that is, with all possible inflections and auxiliary verbs) of the given verb form. For example, for the verb phrase 'is playing' in the example in Section 1, this function will produce 12 target verb forms, one each for possible combinations of elements from the sets (masculine and feminine), (singular and plural) and (first, second and third person). The function for producing the list of verb forms given the English verb form is implemented using machine alignments and monolingual data as done in Gandhe et al. (2011) . It uses parallel data to extract all the source-target verb phrase-pairs from the word-aligned data. These source-target verb phrase-pairs are converted into an abstract form by replacing the root verb with an 'X' (as done in Section 4.2). Aggregating this over a large amount of parallel data and filtering out the low frequency phrase-pairs gives us translations of a source verb form into its corresponding target forms. The gender, number and person for each of the target verb forms can be found out by looking at the inflections, suffixes and auxiliary verbs.",
"cite_spans": [
{
"start": 701,
"end": 721,
"text": "Gandhe et al. (2011)",
"ref_id": "BIBREF3"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Verb Phrase Classifier 2",
"sec_num": "4.3"
},
{
"text": "We use an English parser to parse the source sentence and obtain the different features. Using the alignments of the subject, object and verb phrase, we project them onto the target language and extract the expected output case-marker/inflections for each of the three cases (SubCM, ObjCM, Vp) and assign it the corresponding class. Our approach is not limited to hand-alignments. Alignments obtained from automatic aligners can also be used. Since hand-alignments were available beforehand, we made use of these alignments in this work. We will explore the usability of automatic aligners as future work. We now briefly describe the features that we used for the above classifiers.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Training",
"sec_num": "4.4"
},
{
"text": "Given the parse tree of an English sentence, we determine the subject noun phrases and the object noun phrases for each of the verb phrases present in the input sentence giving (subject,object,verb) triples. We also determine the morphological information about the subject, object and verb phrases in sentence (in Sections 5.1 and 5.2). Most of the features described are boolean, unless specified otherwise. Figure 1 shows an example of an English-Hindi word-aligned sentence-pair. The dependency parse of the English sentence is used to determine the source subject (sita), object(chess) and the verb phrase (is playing). Features are calculated over these phrases and the target words aligned to them in the word alignments are used to create the training examples for the three classifiers.",
"cite_spans": [],
"ref_spans": [
{
"start": 410,
"end": 418,
"text": "Figure 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Features",
"sec_num": "5"
},
{
"text": "!\"#$%%%\"&%%%%'($)\"*+%%%%,-.&&%%%/\"#-%%%#-.%%%%%+0$*1%2$&#.0% nsubj(playing-3, Sita-1) aux(playing-3, is-2) root(ROOT-0, playing-3) dobj(playing-3, chess-4) prep(playing-3, with-5) det(master-8, the-6) amod(master-8, grand-7) pobj(with-5, master-8) Subject: sita (fem,sing,third) No case marker Object: chess (mas,sing,third) case marker 'Ko' Verb Phrase: is playing (present continuous,third person) Sita is playing chess with the grand master (sItaa)(graand)(master) (ke) (saath) (ches) (ko) (khel) (rahI) (haE) Alignment 3&4#$$5%3+0$*15%32$&#.05%36.5%3&$$#-5%3,-.&5%3675%36-.(5%30$-45%3-$85%% *&9:;3'($)\"*+<=>%!\"#$<?5% $9@3'($)\"*+<=>%\"&<A5% 077#3BCCD<E>%'($)\"*+<=5% 17:;3'($)\"*+<=>%,-.&&<F5% '0.'3'($)\"*+<=>/\"#-<G5% 1.#32$&#.0<H>%#-.<I5% $27132$&#.0<H>%+0$*1<J5% '7:;3/\"#-<G>%2$&#.0<H5% !9:;.,#K%!\"#$%3L.2>%&\"*+>%#-\"015% %%%%%%%%%%%%%%%*7%,$&.%2$06.0% % C:;.,#K%,-.&&%32$&,>%&\"*+>%#-\"015% %%%%%%%%%%%%%%,$&.%2$06.0%M67N% % O.0:%'-0$&.K%\"&%'($)\"*+% 3'0.&.*#%,7*P*979&>%#-\"01%'.0&7*5% Figure 1 : An English parse with features.",
"cite_spans": [
{
"start": 262,
"end": 278,
"text": "(fem,sing,third)",
"ref_id": null
}
],
"ref_spans": [
{
"start": 985,
"end": 993,
"text": "Figure 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Features",
"sec_num": "5"
},
{
"text": "The inflection on the verb phrase is influenced by attributes of a noun phrase:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Noun Phrase Features",
"sec_num": "5.1"
},
{
"text": "Gender: Unlike English, most Indian languages have a gender (male/female) for every subject and object. To determine the gender of an English word, we take its most common Hindi translation and assign the gender of this translation to the English word. Gender of Hindi words can be determined by mining the Hindi monolingual data for (noun phrase,verb phrase) pairs using a simple POS tagger on Hindi data. POS taggers are now easily available for most Indian languages. However, no other rich sources such as, parsers or morphological analyzers are used on the target language. We then assign the gender of the verb phrase suffix ('a' for masculine and 'I' for feminine) to the words in the noun phrase. Doing this over a large amount of data gives us the list of nouns with their gender. For example, the Hindi word 'kItAb' is seen with verb phrases such as, 'padI','dI', etc. in the monolingual data. Since 'kItAb' occurs most with verb phrases ending in suffix 'I', its gender is 'female'. The English word 'book' translates most often to 'kItAb' and is hence assigned the gender 'female' and the corresponding feature value of 1. For words like, 'house', which are determined to be 'male', the value is 0.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Noun Phrase Features",
"sec_num": "5.1"
},
{
"text": "Number: Similar to the gender, the singularity or plurality of the noun phrase influences the inflection on the verb phrase. The plurality of the English noun can be determined by using a POS tagger and looking for a 'NNS' tag or in case of pronouns, a finite list of pronouns. Hence, nouns in plural form and the pronouns, 'they','us','them', were given the feature value as 1. For all other singular words, the value is 0.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Noun Phrase Features",
"sec_num": "5.1"
},
{
"text": "Presence of case marker: Perhaps the most important feature, the presence or absence of a case marker on the target subject and object phrase decides the transfer of inflections from the noun phrases to the verb phrase (examples of Section 2). This is not a source side feature, since case markers are present on the noun phrases in the target language. We cannot use the case marker information directly as we do not have the target side information. Hence they are used in two steps: a) Subject and Object classifiers (Section 4) are used to predict the noun phrase (subject,object) case markers and b) The predicted case markers are used as an input to the verb phrase classifier. This feature is not used as an input to the subject and object classifiers. If a subject/object case marker is present, the features are valued 1, else 0.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Noun Phrase Features",
"sec_num": "5.1"
},
{
"text": "The verb phrase features influence the tense, aspect and person of the target verb phrase as well as the case marker presence on the noun phrases. The verb phrase extracted from the dependency parse of the input sentence are morphologically segmented (Minnen et al., 2001 ) and the different aspects of the verb phrase are obtained from it.",
"cite_spans": [
{
"start": 251,
"end": 271,
"text": "(Minnen et al., 2001",
"ref_id": "BIBREF11"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Verb Phrase Features",
"sec_num": "5.2"
},
{
"text": "Tense Features: The tense features tell the presence or absence of Present, Past and Future tense. For instance, for the verb phrase 'was explained', the present and future features take the value 0 and the past feature takes the value 1.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Verb Phrase Features",
"sec_num": "5.2"
},
{
"text": "Aspect Features: The aspect features are important in deciding the final form and and the auxiliaries in the target sentence. We label the features as simple, progressive and perfect. In this case, a verb phrase with a 'ing' suffix is said to be progressive, whereas a verb phrase with 'have' and its inflections is said to be perfect. For example, the phrase 'has been explaining' will have both progressive and perfect features with value 1.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Verb Phrase Features",
"sec_num": "5.2"
},
{
"text": "Mood Features: The mood features capture the obligation, conditional and probability mood in the input English sentence by looking at the modal verbs which are required to produce the corresponding auxiliary verbs in Hindi.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Verb Phrase Features",
"sec_num": "5.2"
},
{
"text": "Number: English verb forms with plurality inflection translates into plurality of the Hindi verbs.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Verb Phrase Features",
"sec_num": "5.2"
},
{
"text": "Person: English auxiliary verb 'am' denotes the presence of first person. By looking at the subject of the verb in the dependency parser, (first, second or third) the person information can be assigned to the verb phrase.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Verb Phrase Features",
"sec_num": "5.2"
},
{
"text": "These features capture the more language-specific nuances that together decide the transfer of inflections from nouns to verbs. These features try to emulate the behavior of grammar rules. Case marker-Gender: When a case marker is not present on the noun phrase, the inflection from them is likely to be transfered to the verb phrase. For this case, we assign this feature the same value as the gender of the noun phrase. When a case marker is present, information is blocked and hence we assign a null value to this feature.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conjugate Features",
"sec_num": "5.3"
},
{
"text": "This feature captures blocking of the number information and takes a value 0 or 1 depending on the presence or absence of case marker.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Case marker-Number:",
"sec_num": null
},
{
"text": "Tense-Gender: When the tense of the sentence is past, it is likely that the gender information is blocked. Hence, when the tense is past, this feature is assigned a null value. Otherwise, the value is same as the value of the gender feature.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Case marker-Number:",
"sec_num": null
},
{
"text": "Tense-Number: Similar to the previous one, except that this captures the blocking of number information.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Case marker-Number:",
"sec_num": null
},
{
"text": "We used a PBSMT system, similar to Tillman et al. (2006) , to decode and this required slight modifications to incorporate our approach. The extracted phrase-pairs have phrase translation probabilities and lexical probabilities estimated (similar to Papineni et al. (2002) ). The input sentence is passed through a parser to determine the subject, object and the verb phrases in the sentence. Various features mentioned in the previous section are computed during run time and the classfiers are used to predict the subject case marker, object case marker and the verb phrase inflection. The agreement constraints can be applied as:",
"cite_spans": [
{
"start": 35,
"end": 56,
"text": "Tillman et al. (2006)",
"ref_id": "BIBREF16"
},
{
"start": 250,
"end": 272,
"text": "Papineni et al. (2002)",
"ref_id": "BIBREF12"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Decoding",
"sec_num": "6"
},
{
"text": "Hard Removal: All phrase-pairs that do not agree with the predicted case marker or inflections are removed from the phrase table before the hypothesis search.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Decoding",
"sec_num": "6"
},
{
"text": "Soft Removal: The agreement model outputs the prediction probabilities for different target case markers or inflections. This probability score can be used as a feature in the phrase table and trained on a development data set.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Decoding",
"sec_num": "6"
},
{
"text": "Addition: If the predicted case marker or inflection is not present in the original phrase table, the correct phrase-pair can be added by automatically generating the target phrase.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Decoding",
"sec_num": "6"
},
{
"text": "The input sentence is fed into the agreement model to produce the constraints for the subject, object and verb phrases. We use the hard constraint and addition techniques during decoding. Applying soft constraints will be done in future work. For subject and object phrases, we aggregate the phrase-pairs in the phrase table which contain the English source word. From these, all phrase-pairs that do not agree with the predicted case markers on the target side are filtered. In addition, if the predicted case marker is not present in the phrase table, we add the phrase-pair with the right case marker into the phrase table. This is done by looking for the most common target translations of the source word and appending the predicted case marker to them. For verb phrases, we aggregate the phrase-pairs containing the English verb phrase.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Decoding",
"sec_num": "6"
},
{
"text": "All phrase-pairs which do not have the predicted target verb phrase inflections are filtered. Since we do not know the complete translation of the source verb phrase at this step, we look only for the predicted target verb phrase's inflection and auxiliary verbs. If no correct verb phrase form is found in the phrase table, the target phrase is generated using the most common translation of the English verb and the phrase-pair is added. Inorder to score these new phrase-pairs, we can make use of the automatically generated bilingual dictionaries created during the automatic word-alignment phase. The phrase-pairs and entries in the dictionaries can be stemmed to their base forms (removing inflections) using Ramanathan et al. (2003) . In cases where there are multiple instances of the same verb (caused due to stemming) present in the modified dictionary, the average of the probabilities is taken. The lexical probabilities for the phrase-pairs can then be estimated as given in Papineni et al. (2002) from the modified dictionaries. To obtain the phrase translation probabilities, the scores from the classifiers are converted to a score between 0 and 1 using a logistic function (1/(1 + e \u2212score ), where, score:classifier's score) and then re-normalized such that the sum of probabilities of all the target phrases for a particular source phrase is one (and vice versa). In the case of 'Verb Phrase Classifier 2' (Section 4.3), the scores from each of the classifiers is first converted to a score between 0 and 1 using a logistic function, summed and then re-normalized.",
"cite_spans": [
{
"start": 715,
"end": 739,
"text": "Ramanathan et al. (2003)",
"ref_id": "BIBREF14"
},
{
"start": 988,
"end": 1010,
"text": "Papineni et al. (2002)",
"ref_id": "BIBREF12"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Decoding",
"sec_num": "6"
},
{
"text": "We first report the results of prediction of noun phrases and verb phrases and proceed on to report the results of using them in PBSMT.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Experiments",
"sec_num": "7"
},
{
"text": "To aggregate the classes required for subject, object and verb phrase classifiers, we used 1.4 million Hindi monolingual sentences crawled from the web. We pos-tagged this data using iit kgp Hindi pos tagger 2 . The monolingual data, along with 280,000 automatic alignments of sentencepairs, was used to apply the technique suggested in Gandhe et al. (2011) to build the base verb form function described in Section 4.2. The svm classifiers were trained and tested using libsvm 3 . To extract the features from manually aligned sentences, we used the Stanford Parser 4 to obtain the English dependency parse trees. The source English side was morphologically segmented using morpha (Minnen et al., 2001 ) and the target Hindi side was segmented using an approach described in Ramanathan et al. (2003) . Table 2 gives the accuracies of the classifiers when trained with a particular set of features. The conjugate features make a significant improvement to all the three classifiers. Hindi object case markers are easier to predict than subject case markers since the objects usually do not occur with a case marker. Also, the subject case markers show a high dependency on the verb phrase features, which is explained by grammatical rules, according to which tense and structure of the verb phrase decide the case marker on the subject. It is important to remember here that the verb phrase classifier uses the output of the case-markers predicted by noun classifiers as a feature. The prediction accuracy is low for the Vp classifier even with conjugate features due to the large number of classes. Most classes do not have sufficient training examples and a few classes were even absent in the training data. When we split this classification into separate tasks as explained in Section 4.3 and later combine the output of individual classifiers to obtain the predicted verb phrase, we obtain a much better accuracy. The results of this configuration are shown in Table 3 . Since the verb phrase classifier uses case-markers as a feature, we also analyze the importance of these for verb phrase prediction and study 3 different settings: a) Removing the case marker (CM) feature, b) Using Gold case markers from the reference and c) Using the predicted case markers. Although the prediction accuracies are best for GoldCM, using the predicted case markers results in only a slight drop in accuracy. ",
"cite_spans": [
{
"start": 337,
"end": 357,
"text": "Gandhe et al. (2011)",
"ref_id": "BIBREF3"
},
{
"start": 682,
"end": 702,
"text": "(Minnen et al., 2001",
"ref_id": "BIBREF11"
},
{
"start": 776,
"end": 800,
"text": "Ramanathan et al. (2003)",
"ref_id": "BIBREF14"
}
],
"ref_spans": [
{
"start": 803,
"end": 810,
"text": "Table 2",
"ref_id": "TABREF4"
},
{
"start": 1966,
"end": 1973,
"text": "Table 3",
"ref_id": "TABREF6"
}
],
"eq_spans": [],
"section": "Prediction Evaluation",
"sec_num": "7.1"
},
{
"text": "The system was trained on 285,000 automatically aligned sentences. The baseline system uses the standard decoding algorithm while our approach prunes the phrase table before decoding. We measure the translation quality using a single reference BLEU (Papineni et al., 2002) . The test set contains 715 sentences from the News domain. Table 4 gives the comparison of the baseline with the two systems (Note: In both systems, the case marker features are obtained from the predictions of the subject and object classifiers): Pred1: Verb phrase prediction as a single task (Table 2) Pred2: Verb phrase prediction split into individual components (Table 3) . The BLEU score increase is small on Pred1 but was significantly better with Pred2 with p < 0.0001 with the Wilcoxon Signed-Rank test (Wilcoxon, 1945) performed by dividing the test file into 10 equal subfiles (as done in Gangadharaiah et al. (2010) ). On analysis of the refer-ence, we found the tense of the verb phrases in the Hindi reference to be different from that of English. Also, often the presence of auxiliary verbs 'hona' in the Hindi reference changed the structure of the verb phrase. The output produced by our system is more literal and in congruence with the grammar of the input sentence. Callison et al. (2006) list the disadvantages of using BLEU. The differences in translations between the proposed approaches and the baseline are most often a correction of inflection, and sometimes this resulted in better selection of neighboring words by the language model. BLEU failed to accommodate these improvements, hence we also performed human evaluation to judge the quality of the translations on adequacy and fluency using a scale of 1-5 5 .",
"cite_spans": [
{
"start": 249,
"end": 272,
"text": "(Papineni et al., 2002)",
"ref_id": "BIBREF12"
},
{
"start": 787,
"end": 803,
"text": "(Wilcoxon, 1945)",
"ref_id": "BIBREF18"
},
{
"start": 875,
"end": 902,
"text": "Gangadharaiah et al. (2010)",
"ref_id": "BIBREF4"
},
{
"start": 1261,
"end": 1283,
"text": "Callison et al. (2006)",
"ref_id": "BIBREF0"
}
],
"ref_spans": [
{
"start": 333,
"end": 340,
"text": "Table 4",
"ref_id": "TABREF8"
},
{
"start": 642,
"end": 651,
"text": "(Table 3)",
"ref_id": "TABREF6"
}
],
"eq_spans": [],
"section": "Machine Translation Evaluation",
"sec_num": "7.2"
},
{
"text": "We gave 100 randomly picked sentences from the test set to a single human judge. We see that our approach (Table 4 ) has a greater impact on fluency, suggesting that grammatical agreement is important for fluency. Adequacy improvement can be attributed to the correct translations of the case markers and the tense information.",
"cite_spans": [],
"ref_spans": [
{
"start": 106,
"end": 114,
"text": "(Table 4",
"ref_id": "TABREF8"
}
],
"eq_spans": [],
"section": "Machine Translation Evaluation",
"sec_num": "7.2"
},
{
"text": "We modeled the task of case marker and inflection prediction as a classification task.The prediction accuracies show that the inflections on the verbs are highly influenced by the case markers on the subjects and objects. Similarly, the case markers on subjects are affected by the tense of the verb phrases. Since all the features are extracted from the source side, this approach can be easily applied for improving translation quality from English to any morphologically rich foreign language. More work can be done on creating features that encode the grammatical rules we might have missed.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion and future work",
"sec_num": "8"
},
{
"text": "Even though the gain in translation quality with the BLEU score was small, human evaluation showed that this approach helps in improving the fluency and adequacy of the sentence and hence makes it more readable. Future work can be on using more than one possible case marker-verb phrase constraints (i.e., as a soft constraint) for a given input and applying this approach for other language-pairs where the target language is morphologically rich.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion and future work",
"sec_num": "8"
},
{
"text": "http://nltr.org/snltr-software/ 3 http://www.csie.ntu.edu.tw/ cjlin/libsvm/ 4 http://nlp.stanford.edu/software/lex-parser.shtml",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "We used the scale defined in http://projects.ldc.upenn.edu/TIDES/Translation/TransAssess04.pdf",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Re-evaluating the role of bleu in machine translation research",
"authors": [
{
"first": "C",
"middle": [],
"last": "Callison-Burch",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Osborne",
"suffix": ""
},
{
"first": "P",
"middle": [],
"last": "Koehn",
"suffix": ""
}
],
"year": 2006,
"venue": "EACL",
"volume": "",
"issue": "",
"pages": "249--256",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "C. Callison-Burch, M. Osborne, and P. Koehn. 2006. Re-evaluating the role of bleu in machine translation research. In EACL, pages 249-256.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "On the algorithmic implementation of multiclass kernel-based vector machines",
"authors": [
{
"first": "K",
"middle": [],
"last": "Crammer",
"suffix": ""
},
{
"first": "Y",
"middle": [],
"last": "Singer",
"suffix": ""
}
],
"year": 2001,
"venue": "Journal of Machine Learning Research",
"volume": "",
"issue": "",
"pages": "265--292",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "K. Crammer and Y. Singer. 2001. On the algorith- mic implementation of multiclass kernel-based vec- tor machines. In Journal of Machine Learning Re- search, pages 265-292.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Cross linguistic name matching in english and arabic: a \"one to many mapping\" extension of the levenshtein edit distance algorithm",
"authors": [
{
"first": "A",
"middle": [
"T"
],
"last": "Freeman",
"suffix": ""
},
{
"first": "S",
"middle": [
"L"
],
"last": "Condon",
"suffix": ""
},
{
"first": "C",
"middle": [
"M"
],
"last": "Ackerman",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of the main conference on Human Language Technology Conference of the North American Chapter of the Association of Computational Linguistics, HLT-NAACL '06",
"volume": "",
"issue": "",
"pages": "471--478",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "A. T. Freeman, S. L. Condon, and C. M. Ackerman. 2006. Cross linguistic name matching in english and arabic: a \"one to many mapping\" extension of the levenshtein edit distance algorithm. In Proceedings of the main conference on Human Language Tech- nology Conference of the North American Chap- ter of the Association of Computational Linguistics, HLT-NAACL '06, pages 471-478.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Handling verb phrase morphology for indian languages in machine translation",
"authors": [
{
"first": "A",
"middle": [],
"last": "Gandhe",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Gangadharaiah",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Visweswariah",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Ramakrishnan",
"suffix": ""
}
],
"year": 2011,
"venue": "Proceedings of the International Joint Conference on Natural Langauge Processing. Asian federation for NLP",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "A. Gandhe, R. Gangadharaiah, K. Visweswariah, and A. Ramakrishnan. 2011. Handling verb phrase mor- phology for indian languages in machine translation. In Proceedings of the International Joint Conference on Natural Langauge Processing. Asian federation for NLP.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Monolingual distributional profiles for word substitution in machine translation",
"authors": [
{
"first": "R",
"middle": [],
"last": "Gangadharaiah",
"suffix": ""
},
{
"first": "R",
"middle": [
"D"
],
"last": "Brown",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Carbonell",
"suffix": ""
}
],
"year": 2010,
"venue": "Proceedings of the 23rd International Conference on Computational Linguistics",
"volume": "",
"issue": "",
"pages": "320--328",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "R. Gangadharaiah, R. D. Brown, J. Carbonell. 2010. Monolingual distributional profiles for word sub- stitution in machine translation. In Proceedings of the 23rd International Conference on Computa- tional Linguistics, pages 320-328.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "A class-based agreement model for generating accurately inflected translations",
"authors": [
{
"first": "S",
"middle": [],
"last": "Green",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Denero",
"suffix": ""
}
],
"year": 2012,
"venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics, ACL",
"volume": "",
"issue": "",
"pages": "146--155",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "S. Green and J. DeNero. 2012. A class-based agreement model for generating accurately inflected translations. In Proceedings of the 50th Annual Meeting of the Association for Computational Lin- guistics, ACL, pages 146-155.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Four techniques for online handling of out-of-vocabulary words in arabic-english statistical machine translation",
"authors": [
{
"first": "N",
"middle": [],
"last": "Habash",
"suffix": ""
}
],
"year": 2008,
"venue": "Proceedings of the 46th Annual Meeting of the Association for Computational Linguistics on Human Language Technologies: Short Papers, HLT-Short '08",
"volume": "",
"issue": "",
"pages": "57--60",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "N. Habash. 2008. Four techniques for online han- dling of out-of-vocabulary words in arabic-english statistical machine translation. In Proceedings of the 46th Annual Meeting of the Association for Compu- tational Linguistics on Human Language Technolo- gies: Short Papers, HLT-Short '08, pages 57-60.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Europarl: A parallel corpus for statistical machine translation",
"authors": [
{
"first": "P",
"middle": [],
"last": "Koehn",
"suffix": ""
}
],
"year": 2005,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "P. Koehn. 2005. Europarl: A parallel corpus for statis- tical machine translation. In MT Summit.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Moses: Open Source Toolkit for Statistical Machine Translation. Annual Meeting of the Association for Computational Linguistics (ACL)",
"authors": [
{
"first": "P",
"middle": [],
"last": "Koehn",
"suffix": ""
},
{
"first": "H",
"middle": [],
"last": "Hoang",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Birch",
"suffix": ""
},
{
"first": "C",
"middle": [],
"last": "Callison-Burch",
"suffix": ""
},
{
"first": "F",
"middle": [],
"last": "Marcello",
"suffix": ""
},
{
"first": "N",
"middle": [],
"last": "Bertoldi",
"suffix": ""
},
{
"first": "B",
"middle": [],
"last": "Cowan",
"suffix": ""
},
{
"first": "W",
"middle": [],
"last": "Shen",
"suffix": ""
},
{
"first": "C",
"middle": [],
"last": "Moran",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Zens",
"suffix": ""
},
{
"first": "C",
"middle": [],
"last": "Dyer",
"suffix": ""
},
{
"first": "O",
"middle": [],
"last": "Bojar",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Constantin",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Herbst",
"suffix": ""
}
],
"year": 2007,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "P. Koehn, H. Hoang, A. Birch, C. Callison-Burch, F. Marcello, N. Bertoldi, B. Cowan, W. Shen, C. Moran, R. Zens, C. Dyer, O. Bojar, A. Constantin, E. Herbst. 2007. Moses: Open Source Toolkit for Statistical Machine Translation. Annual Meet- ing of the Association for Computational Linguistics (ACL), demonstration session.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Stat-xfer: a general search-based syntax-driven framework for machine translation",
"authors": [
{
"first": "A",
"middle": [],
"last": "Lavie",
"suffix": ""
}
],
"year": 2008,
"venue": "Proceedings of the 9th international conference on Computational linguistics and intelligent text processing, CICLing",
"volume": "",
"issue": "",
"pages": "362--375",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "A. Lavie. 2008. Stat-xfer: a general search-based syntax-driven framework for machine translation. In Proceedings of the 9th international conference on Computational linguistics and intelligent text pro- cessing, CICLing, pages 362-375.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Generating complex morphology for machine translation",
"authors": [
{
"first": "E",
"middle": [],
"last": "Minkov",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Toutanova",
"suffix": ""
},
{
"first": "H",
"middle": [],
"last": "Suzuki",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of the 45th Annual Meeting of the Association of Computational Linguistics, ACL",
"volume": "",
"issue": "",
"pages": "128--135",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "E. Minkov, K. Toutanova, and H. Suzuki. 2007. Gen- erating complex morphology for machine transla- tion. In Proceedings of the 45th Annual Meeting of the Association of Computational Linguistics, ACL, pages 128-135.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Applied morphological processing of english",
"authors": [
{
"first": "G",
"middle": [],
"last": "Minnen",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Carroll",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "Pearce",
"suffix": ""
}
],
"year": 2001,
"venue": "Natural Language Engineering",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "G. Minnen, J. Carroll, and D. Pearce. 2001. Applied morphological processing of english. In Natural Language Engineering.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "BLEU: a method for automatic evaluation of machine translation",
"authors": [
{
"first": "K",
"middle": [],
"last": "Papineni",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "Roukos",
"suffix": ""
},
{
"first": "T",
"middle": [],
"last": "Ward",
"suffix": ""
},
{
"first": "W",
"middle": [
"J"
],
"last": "Zhu",
"suffix": ""
}
],
"year": 2002,
"venue": "Proceedings of ACL-2002: 40th Annual meeting of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "311--318",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "K. Papineni, S. Roukos, T. Ward and W.J. Zhu. 2002. BLEU: a method for automatic evaluation of ma- chine translation. In Proceedings of ACL-2002: 40th Annual meeting of the Association for Compu- tational Linguistics, pages 311-318.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Towards the use of word stems and suffixes for statistical machine translation",
"authors": [
{
"first": "M",
"middle": [],
"last": "Popovic",
"suffix": ""
},
{
"first": "H",
"middle": [],
"last": "Ney",
"suffix": ""
}
],
"year": 2004,
"venue": "Proceedings of The International Conference on Language Resources and Evaluation",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "M. Popovic and H. Ney. 2004. Towards the use of word stems and suffixes for statistical machine trans- lation. In Proceedings of The International Confer- ence on Language Resources and Evaluation.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "A Lightweight Stemmer for Hindi. Workshop on Computational Linguistics for South-Asian Languages, EACL",
"authors": [
{
"first": "A",
"middle": [],
"last": "Ramanathan",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "Rao",
"suffix": ""
}
],
"year": 2003,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "A. Ramanathan and D. Rao. 2003. A Lightweight Stemmer for Hindi. Workshop on Computational Linguistics for South-Asian Languages, EACL.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "Case markers and Morphology: Addressing the crux of the fluency problem in English-Hindi SMT",
"authors": [
{
"first": "A",
"middle": [],
"last": "Ramanathan",
"suffix": ""
},
{
"first": "H",
"middle": [],
"last": "Choudhary",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Ghosh",
"suffix": ""
},
{
"first": "P",
"middle": [],
"last": "Bhattacharyya",
"suffix": ""
}
],
"year": 2009,
"venue": "Proceedings of the 47th Annual Meeting of the ACL and the 4th IJCNLP of the AFNLP",
"volume": "",
"issue": "",
"pages": "800--808",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "A. Ramanathan, H. Choudhary, A. Ghosh and P. Bhat- tacharyya. 2009. Case markers and Morphol- ogy: Addressing the crux of the fluency problem in English-Hindi SMT. In Proceedings of the 47th An- nual Meeting of the ACL and the 4th IJCNLP of the AFNLP, pages 800-808.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "Efficient Dynamic Programming Search Algorithms for Phrase-based SMT",
"authors": [
{
"first": "C",
"middle": [],
"last": "Tillman",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of the Workshop CHPSLP at HLT'06",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "C. Tillman. 2006. Efficient Dynamic Programming Search Algorithms for Phrase-based SMT. In Pro- ceedings of the Workshop CHPSLP at HLT'06.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "Applying morphology generation models to machine translation",
"authors": [
{
"first": "K",
"middle": [],
"last": "Toutanova",
"suffix": ""
},
{
"first": "H",
"middle": [],
"last": "Suzuki",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Ruopp",
"suffix": ""
}
],
"year": 2008,
"venue": "Proceedings of the 46th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies",
"volume": "",
"issue": "",
"pages": "514--522",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "K. Toutanova, H. Suzuki, and A. Ruopp. 2008. Ap- plying morphology generation models to machine translation. In Proceedings of the 46th Annual Meet- ing of the Association for Computational Linguis- tics: Human Language Technologies, pages 514- 522.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "Individual comparisons by ranking methods",
"authors": [
{
"first": "F",
"middle": [],
"last": "Wilcoxon",
"suffix": ""
}
],
"year": 1945,
"venue": "Biometrics",
"volume": "1",
"issue": "",
"pages": "80--83",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "F. Wilcoxon. 1945. Individual comparisons by ranking methods. Biometrics, 1, 80-83, http://faculty.vassar.edu/lowry/wilcoxon.html.",
"links": null
},
"BIBREF19": {
"ref_id": "b19",
"title": "Agreement constraints for statistical machine translation into german",
"authors": [
{
"first": "P",
"middle": [],
"last": "Williams",
"suffix": ""
},
{
"first": "P",
"middle": [],
"last": "Koehn",
"suffix": ""
}
],
"year": 2011,
"venue": "Proceedings of the Sixth Workshop on Statistical Machine Translation, WMT",
"volume": "",
"issue": "",
"pages": "217--226",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "P. Williams and P. Koehn. 2011. Agreement con- straints for statistical machine translation into ger- man. In Proceedings of the Sixth Workshop on Sta- tistical Machine Translation, WMT, pages 217-226.",
"links": null
},
"BIBREF20": {
"ref_id": "b20",
"title": "Phrase-based backoff models for machine translation of highly inflected languages",
"authors": [
{
"first": "M",
"middle": [],
"last": "Yang",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Kirchhoff",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of the 21st International Conference on Computational Linguistics",
"volume": "",
"issue": "",
"pages": "1017--1020",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "M. Yang and K. Kirchhoff. 2006. Phrase-based back- off models for machine translation of highly in- flected languages. In Proceedings of the 21st Inter- national Conference on Computational Linguistics, pages 1017-1020.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"num": null,
"text": "is playing with the grand master . T: Ram grand master ke saath khel raha hai . (Ram grand master with play+3+sg+masc+pp)Example 1b: S: Sita is playing with the grand master . T: Sita grand master ke saath khel rahi hai . (Sita grand master with play+3+sg+fem+pp)",
"type_str": "figure",
"uris": null
},
"TABREF2": {
"text": "Classes defined for different classifiers.",
"content": "<table/>",
"num": null,
"type_str": "table",
"html": null
},
"TABREF4": {
"text": "Prediction accuracy for the classifiers.",
"content": "<table/>",
"num": null,
"type_str": "table",
"html": null
},
"TABREF6": {
"text": "Prediction accuracy for verb phrase inflections.",
"content": "<table/>",
"num": null,
"type_str": "table",
"html": null
},
"TABREF8": {
"text": "BLEU score and Human Judgment.",
"content": "<table/>",
"num": null,
"type_str": "table",
"html": null
}
}
}
} |