File size: 141,854 Bytes
6fa4bc9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 |
{
"paper_id": "2021",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T02:11:11.353722Z"
},
"title": "Addressing Zero-Resource Domains Using Document-Level Context in Neural Machine Translation",
"authors": [
{
"first": "Dario",
"middle": [],
"last": "Stojanovski",
"suffix": "",
"affiliation": {},
"email": "stojanovski@cis.lmu.de"
},
{
"first": "Alexander",
"middle": [],
"last": "Fraser",
"suffix": "",
"affiliation": {},
"email": "fraser@cis.lmu.de"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "Achieving satisfying performance in machine translation on domains for which there is no training data is challenging. Traditional supervised domain adaptation is not suitable for addressing such zero-resource domains because it relies on in-domain parallel data. We show that when in-domain parallel data is not available, access to document-level context enables better capturing of domain generalities compared to only having access to a single sentence. Having access to more information provides a more reliable domain estimation. We present two document-level Transformer models which are capable of using large context sizes and we compare these models against strong Transformer baselines. We obtain improvements for the two zero-resource domains we study. We additionally provide an analysis where we vary the amount of context and look at the case where in-domain data is available.",
"pdf_parse": {
"paper_id": "2021",
"_pdf_hash": "",
"abstract": [
{
"text": "Achieving satisfying performance in machine translation on domains for which there is no training data is challenging. Traditional supervised domain adaptation is not suitable for addressing such zero-resource domains because it relies on in-domain parallel data. We show that when in-domain parallel data is not available, access to document-level context enables better capturing of domain generalities compared to only having access to a single sentence. Having access to more information provides a more reliable domain estimation. We present two document-level Transformer models which are capable of using large context sizes and we compare these models against strong Transformer baselines. We obtain improvements for the two zero-resource domains we study. We additionally provide an analysis where we vary the amount of context and look at the case where in-domain data is available.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Training robust neural machine translation models for a wide variety of domains is an active field of work. NMT requires large bilingual resources which are not available for many domains and languages. When there is no data available for a given domain, e.g., in the case of web-based MT tools, this is a significant challenge. Despite the fact that these tools are usually trained on large scale datasets, they are often used to translate documents from a domain which was not seen during training. We call this scenario zero-resource domain adaptation and present an approach using document-level context to address it.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "When an NMT model receives a test sentence from a zero-resource domain, it can be matched to similar domains in the training data. This is to some extent done implicitly by standard NMT. Alternatively, this matching can be facilitated by a domain adaptation technique such as using special domain tokens and features (Kobus et al., 2017; Tars and Fishel, 2018) . However, it is not always easy to determine the domain of a sentence without larger context. Access to document-level context makes it more probable that domain signals can be observed, i.e., words representative of a domain are more likely to be encountered. We hypothesize that this facilitates better matching of unseen domains to domains seen during training and provide experimental evidence supporting this hypothesis.",
"cite_spans": [
{
"start": 317,
"end": 337,
"text": "(Kobus et al., 2017;",
"ref_id": "BIBREF12"
},
{
"start": 338,
"end": 360,
"text": "Tars and Fishel, 2018)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Recent work has shown that contextual information improves MT (Miculicich et al., 2018; Voita et al., 2019b; Maruf et al., 2019) , often by improving anaphoric pronoun translation quality, which can be addressed well with limited context. However, in order to address discourse phenomena such as coherence and cohesion, access to larger context is preferable. Voita et al. (2019b,a) were the first to show large improvements on lexical cohesion in a controlled setting using challenge sets. However, previous work did not make clear whether previous models can help with disambiguation of polysemous words where the sense is domain-dependent.",
"cite_spans": [
{
"start": 62,
"end": 87,
"text": "(Miculicich et al., 2018;",
"ref_id": "BIBREF21"
},
{
"start": 88,
"end": 108,
"text": "Voita et al., 2019b;",
"ref_id": "BIBREF34"
},
{
"start": 109,
"end": 128,
"text": "Maruf et al., 2019)",
"ref_id": "BIBREF20"
},
{
"start": 360,
"end": 382,
"text": "Voita et al. (2019b,a)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "In this work, we study the usefulness of document-level context for zero-resource domain adaptation (which we think has not been studied in this way before). We propose two novel Transformer models which can efficiently handle large context and test their ability to model multiple domains at once. We show that document-level models trained on multi-domain datasets provide improvements on zero-resource domains. We evaluate on English\u2192German translation using TED and PatTR (patent descriptions) as zero-resource domains. In addition to measuring translation quality, we conduct a manual evaluation targeted at word disambiguation. We also present additional experiments on classical domain adaptation where access to in-domain TED and PatTR data is allowed. Our first proposed model, which we call the domain embedding model (DomEmb) applies average or max pooling over all context embeddings and adds this representation to each source tokenlevel embedding in the Transformer. The second model is conceptually similar to previous work on context-aware NMT Stojanovski and Fraser, 2018; Miculicich et al., 2018; and introduces additional multi-head attention components in the encoder and decoder in order to handle the context. However, in order to facilitate larger context sizes, it creates a compressed context representation by applying average or max pooling with a fixed window and stride size. We compare our proposed models against previous context-aware NMT architectures and techniques for handling multi-domain setups, and show they improve upon strong baselines. The proposed models encode context in a coarse-grained way. They only have a limited ability to model discourse phenomena such as coreference resolution, so the gains we see in a multi-domain setup show that they encode domain information. Evaluating on multiple and zero-resource domains allows us to show that context can be used to capture domain information.",
"cite_spans": [
{
"start": 1060,
"end": 1089,
"text": "Stojanovski and Fraser, 2018;",
"ref_id": "BIBREF25"
},
{
"start": 1090,
"end": 1114,
"text": "Miculicich et al., 2018;",
"ref_id": "BIBREF21"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The contributions of our work can be summarized as follows: we (i) propose two NMT models which are able to handle large context sizes, (ii) show that document-level context in a multidomain experimental setup is beneficial for handling zero-resource domains, (iii) show the effect of different context sizes and (iv) study traditional domain adaptation with access to in-domain data.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Domain adaptation Several previous works address the problem that standard NMT may fail to adequately model all domains in a multi-domain setup even when all of the domains are known in advance. Kobus et al. (2017) introduce using domain tags for this problem, a similar method to the domain embedding model in our paper. These domain tags are mapped to corresponding embeddings and are either inserted at the beginning of the sentence or concatenated to the token-level embeddings. The domain embeddings are reserved for specific domains and are fixed for all sentences in a given domain. The number of distinct domain embeddings is limited to the number of known domains. Tars and Fishel (2018) define a similar approach which uses oracle domain tags and tags obtained using supervised methods and unsupervised clustering. However, clustering limits how many domains can be taken into consideration. Furthermore, this approach assumes that sufficient domain information can be obtained from a single sentence alone. Document-level classifiers (Xu et al., 2007) address this problem, but they are not jointly trained with the MT model. Further work in multi-domain MT is Foster and Kuhn (2007) who propose mixture models to dynamically adapt to the target domain, Foster et al. (2010) who build on this work and include instance weighting, Zeng et al. (2018) where domain-specific and domain-shared annotations from adversarial domain classifiers are used and Britz et al. (2017) where a discriminator is used to backpropagate domain signals.",
"cite_spans": [
{
"start": 195,
"end": 214,
"text": "Kobus et al. (2017)",
"ref_id": "BIBREF12"
},
{
"start": 674,
"end": 696,
"text": "Tars and Fishel (2018)",
"ref_id": null
},
{
"start": 1045,
"end": 1062,
"text": "(Xu et al., 2007)",
"ref_id": "BIBREF36"
},
{
"start": 1172,
"end": 1194,
"text": "Foster and Kuhn (2007)",
"ref_id": "BIBREF7"
},
{
"start": 1265,
"end": 1285,
"text": "Foster et al. (2010)",
"ref_id": "BIBREF6"
},
{
"start": 1341,
"end": 1359,
"text": "Zeng et al. (2018)",
"ref_id": "BIBREF38"
},
{
"start": 1461,
"end": 1480,
"text": "Britz et al. (2017)",
"ref_id": "BIBREF2"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "Continued training is an established technique for domain adaptation if access to in-domain resources is possible. The method entails initially training on out-of-domain data, and then continuing training on in-domain data (Luong and Manning, 2015) . Chen et al. (2017) and Zhang and Xiong (2018) improve upon this paradigm by integrating a domain classifier or a domain similarity metric into NMT and modifying the training cost based on weights indicating in-domain or out-ofdomain data. Sajjad et al. (2017) and Farajian et al. (2017) use continued training in a multi-domain setup and propose various ways of fine-tuning to in-domain data. Standard continued training (Luong and Manning, 2015) leads to catastrophic forgetting, evident by the degrading performance on the out-of-domain dataset. Freitag and Al-Onaizan (2016) address this issue by ensembling the original and the fine-tuned model. We show that our model obtains significant improvements compared to a baseline with the ensembling paradigm. In contrast to these previous works, we do not know the domains during training. Our proposed approaches model the domain implicitly by looking at document-level context. Moreover, we evaluate performance on domains not seen during training. Naradowsky et al. (2020) adapt to unseen domains using bandit learning techniques. The method relies on explicit user feedback which is not always easily available. Bapna and Firat (2019) propose a retrieval-based method that, at inference time, adapts to domains not seen during training. However, they assume access to in-domain parallel data at inference time, and they retrieve parallel phrases from this in-domain data. In our zero-resource experiments, we have no access to in-domain parallel data.",
"cite_spans": [
{
"start": 223,
"end": 248,
"text": "(Luong and Manning, 2015)",
"ref_id": "BIBREF17"
},
{
"start": 251,
"end": 269,
"text": "Chen et al. (2017)",
"ref_id": "BIBREF3"
},
{
"start": 274,
"end": 296,
"text": "Zhang and Xiong (2018)",
"ref_id": "BIBREF40"
},
{
"start": 490,
"end": 510,
"text": "Sajjad et al. (2017)",
"ref_id": "BIBREF24"
},
{
"start": 515,
"end": 537,
"text": "Farajian et al. (2017)",
"ref_id": "BIBREF5"
},
{
"start": 799,
"end": 828,
"text": "Freitag and Al-Onaizan (2016)",
"ref_id": "BIBREF8"
},
{
"start": 1252,
"end": 1276,
"text": "Naradowsky et al. (2020)",
"ref_id": "BIBREF23"
},
{
"start": 1417,
"end": 1439,
"text": "Bapna and Firat (2019)",
"ref_id": "BIBREF0"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "Context-aware NMT A separate field of inquiry is context-aware NMT which proposes integrating cross-sentence context (Tiedemann and Scherrer, 2017; Bawden et al., 2018; Stojanovski and Fraser, 2018; Miculicich et al., 2018; Tu et al., 2018; Maruf and Haffari, 2018; Voita et al., 2019b; Maruf et al., 2019; Yang et al., 2019; Voita et al., 2019a; Tan et al., 2019) . These works show that context helps with discourse phenomena such as anaphoric pronouns, deixis and lexical cohesion. Kim et al. (2019) show that using context can improve topicaware lexical choice, but in a single-domain setup.",
"cite_spans": [
{
"start": 117,
"end": 147,
"text": "(Tiedemann and Scherrer, 2017;",
"ref_id": "BIBREF30"
},
{
"start": 148,
"end": 168,
"text": "Bawden et al., 2018;",
"ref_id": "BIBREF1"
},
{
"start": 169,
"end": 198,
"text": "Stojanovski and Fraser, 2018;",
"ref_id": "BIBREF25"
},
{
"start": 199,
"end": 223,
"text": "Miculicich et al., 2018;",
"ref_id": "BIBREF21"
},
{
"start": 224,
"end": 240,
"text": "Tu et al., 2018;",
"ref_id": "BIBREF31"
},
{
"start": 241,
"end": 265,
"text": "Maruf and Haffari, 2018;",
"ref_id": "BIBREF19"
},
{
"start": 266,
"end": 286,
"text": "Voita et al., 2019b;",
"ref_id": "BIBREF34"
},
{
"start": 287,
"end": 306,
"text": "Maruf et al., 2019;",
"ref_id": "BIBREF20"
},
{
"start": 307,
"end": 325,
"text": "Yang et al., 2019;",
"ref_id": "BIBREF37"
},
{
"start": 326,
"end": 346,
"text": "Voita et al., 2019a;",
"ref_id": "BIBREF33"
},
{
"start": 347,
"end": 364,
"text": "Tan et al., 2019)",
"ref_id": "BIBREF28"
},
{
"start": 485,
"end": 502,
"text": "Kim et al. (2019)",
"ref_id": "BIBREF11"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "Previous work on context-aware NMT has mostly worked with limited context. Miculicich et al. (2018) address the problem by reusing previously computed encoder representations, but report no BLEU improvements by using context larger than 3 sentences. find 2 sentences of context to work the best. Maruf and Haffari (2018) use a fixed pretrained RNN encoder for context sentences and only train the documentlevel RNN. Junczys-Dowmunt (2019) concatenates sentences into very large inputs and outputs as in Tiedemann and Scherrer (2017) . Maruf et al. (2019) propose a scalable context-aware model by using sparsemax which can ignore certain words and hierarchical attention which first computes sentence-level attention scores and subsequently word-level scores. However, for domain adaptation, the full encoder representation is too granular and not the most efficient way to obtain domain signals, for which we present evidence in our experiments. Stojanovski and Fraser (2019a) ; Mac\u00e9 and Servan (2019) propose a similar approach to our domain embedding model, but they do not investigate it from a domain adaptation perspective.",
"cite_spans": [
{
"start": 75,
"end": 99,
"text": "Miculicich et al. (2018)",
"ref_id": "BIBREF21"
},
{
"start": 503,
"end": 532,
"text": "Tiedemann and Scherrer (2017)",
"ref_id": "BIBREF30"
},
{
"start": 535,
"end": 554,
"text": "Maruf et al. (2019)",
"ref_id": "BIBREF20"
},
{
"start": 947,
"end": 977,
"text": "Stojanovski and Fraser (2019a)",
"ref_id": "BIBREF26"
},
{
"start": 980,
"end": 1002,
"text": "Mac\u00e9 and Servan (2019)",
"ref_id": "BIBREF18"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "To our knowledge, our work is the first at the intersection of domain adaptation and context-aware NMT and shows that document-level context can be used to address zero-resource domains.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "The models we propose in this work are extensions of the Transformer (Vaswani et al., 2017) . The first approach introduces separate domain embeddings applied to each token-level embedding. The second is conceptually based on previous context-aware models Stojanovski and Fraser, 2018; Miculicich et al., 2018; . Both models are capable of handling documentlevel context. We modify the training data so that all sentences have access to the previous sentences within the corresponding source document. Access to the document-level context is available at test time as well. Sentences are separated with a special <SEP> token from the next sentence. We train and evaluate our models with a 10 sentence context.",
"cite_spans": [
{
"start": 69,
"end": 91,
"text": "(Vaswani et al., 2017)",
"ref_id": "BIBREF32"
},
{
"start": 256,
"end": 285,
"text": "Stojanovski and Fraser, 2018;",
"ref_id": "BIBREF25"
},
{
"start": 286,
"end": 310,
"text": "Miculicich et al., 2018;",
"ref_id": "BIBREF21"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Model",
"sec_num": "3"
},
{
"text": "The first model is shown in Figure 1 . It is inspired by Kobus et al. (2017) which concatenates a special domain tag to each token-level embedding. Kobus et al. (2017) assume access to oracle domain tags during training. However, at inference, perfect domain knowledge is not possible. Consequently, the domain has to be predicted in advance which creates a mismatch between training and inference. An additional problem is inaccurately predicted do-main tags at test time. We modify this approach by replacing the predefined special domain tag with one inferred from the document context. A disadvantage of this approach as opposed to Kobus et al. (2017) is that there is no clear domain indicator. However, the model is trained jointly with the component inferring the domain which increases the capacity of the model to match a sentence from an unseen domain to a domain seen during training.",
"cite_spans": [
{
"start": 57,
"end": 76,
"text": "Kobus et al. (2017)",
"ref_id": "BIBREF12"
},
{
"start": 148,
"end": 167,
"text": "Kobus et al. (2017)",
"ref_id": "BIBREF12"
},
{
"start": 636,
"end": 655,
"text": "Kobus et al. (2017)",
"ref_id": "BIBREF12"
}
],
"ref_spans": [
{
"start": 28,
"end": 36,
"text": "Figure 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Domain Embedding Transformer",
"sec_num": "3.1"
},
{
"text": "The main challenge is producing the domain embedding from the context. We use maximum (DomEmb(max)) or average pooling (DomEmb(avg)) over all token-level context embeddings, both resulting in a single embedding representation. We do not apply self-attention over the context in this model. The intuition is that the embeddings will contain domain information in certain regions of the representation and that this can be extracted by max or average pooling. More domain-specific words will presumably increase the related domain signal. In contrast to a sentencelevel model, large context can help to more robustly estimate the domain. Based on preliminary experimental results, we add a feed-forward neural network after the pooled embedding representation in DomEmb(avg), but not in DomEmb(max). We represent each token as a sum of positional, tokenlevel embeddings and the inferred domain embedding. As the model only averages embeddings, the computational overhead is small. A computational efficiency analysis is provided in the appendix.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Domain Embedding Transformer",
"sec_num": "3.1"
},
{
"text": "The second approach (CtxPool) is similar to previous work on context-aware NMT (e.g., (Stojanovski and Fraser, 2018; ). The model is outlined in Figure 2 . It first creates a compact representation of the context by applying max or average pooling over the context with certain window and stride sizes. The intuition is similar to DomEmb, but pooling over a window provides a more granular representation. We use the concatenation of all context sentences (separated by <SEP>) as input to CtxPool.",
"cite_spans": [
{
"start": 86,
"end": 116,
"text": "(Stojanovski and Fraser, 2018;",
"ref_id": "BIBREF25"
}
],
"ref_spans": [
{
"start": 145,
"end": 153,
"text": "Figure 2",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Context-Aware Transformer with Pooling",
"sec_num": "3.2"
},
{
"text": "The output of applying max or average pooling over time is used as a context representation which is input to a Transformer encoder. We share the first L \u2212 1 encoder layers between the main sentence and the context. L is the number of encoder layers. In the decoder, we add an additional multi-head attention (MHA) over the context. This attention is conditioned on the MHA representation from the main sentence encoder. Subsequently, these two representations are merged using a gated sum. The gate controls information flow from the context.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Context-Aware Transformer with Pooling",
"sec_num": "3.2"
},
{
"text": "In contrast to DomEmb, CtxPool can be used to handle other discourse phenomena such as anaphora resolution. In this work, we use a window size of 10, suitable for domain adaptation. For anaphora, summarizing ten neighboring words makes it difficult to extract antecedent relationships. Careful tuning of these parameters in future work may allow modeling both local and global context.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Context-Aware Transformer with Pooling",
"sec_num": "3.2"
},
{
"text": "We train En\u2192De models on Europarl, NewsCommentary, OpenSubtitles, Rapid and Ubuntu. TED and PatTR are considered to be zero-resource domains for which we have no parallel data. In additional experiments, we also consider classical domain adaptation where we do use TED and PatTR parallel data in a continued training setup. The models are implemented in Sockeye (Hieber et al., 2017) . The code and the datasets are publicly available. 1 The preprocessing details and model hyperparameters are provided in the appendix.",
"cite_spans": [
{
"start": 362,
"end": 383,
"text": "(Hieber et al., 2017)",
"ref_id": "BIBREF9"
},
{
"start": 436,
"end": 437,
"text": "1",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Experiments 4.1 Experimental Setup",
"sec_num": "4"
},
{
"text": "The datasets for some domains are very large. For example, OpenSubtitles contains 22M sentences and PatTR 12M. Due to limited computational resources, we randomly sample documents from these domains, ending up with approximately 10% of the initial dataset size. We keep the original size for the remaining datasets. Dataset sizes for all domains are presented in Table 1 . The development and test sets are also randomly sampled from the original datasets. We sample entire documents rather than specific sentences. For TED we use tst2012 as dev and tst2013 as test set. The TED and PatTR dev sets are only used in the fine-tuning experiments where we assume access to in-domain data and are not used in any other experiment.",
"cite_spans": [],
"ref_spans": [
{
"start": 363,
"end": 370,
"text": "Table 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Datasets",
"sec_num": "4.2"
},
{
"text": "Europarl from lack of document boundaries (M\u00fcller et al., 2018; Stojanovski and Fraser, 2019b) or random context . To a large extent, both issues can be ignored, given the nature of our models. DomEmb is oblivious to the sentence order. CtxPool preserves some notion of sequentiality, but it should also be robust to these issues. Furthermore, we focus on obtaining domain signals. Even in an extreme case where the context comes from a different document (but from the same domain) we hypothesize similar performance. We later conduct an ablation study into whether arbitrary context from the same domain has a negative effect on performance. The results partially support our hypothesis by either matching or exceeding sentence-level performance, but also show that the correct context is important to obtain the best results.",
"cite_spans": [
{
"start": 42,
"end": 63,
"text": "(M\u00fcller et al., 2018;",
"ref_id": "BIBREF22"
},
{
"start": 64,
"end": 94,
"text": "Stojanovski and Fraser, 2019b)",
"ref_id": "BIBREF27"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Datasets",
"sec_num": "4.2"
},
{
"text": "We compare our proposed methods against a sentence-level baseline (SentBase) and the domain tag (TagBase) approach (Kobus et al., 2017) . We train TagBase with oracle domain tags, while at test time, we use tags obtained from a document-level domain classifier. All sentences within a document are marked with the same predicted domain tag. The domain classifier is a two-layer feed-forward network and the documents are represented as a bag-of-words. The classifier obtains an accuracy of 98.6%. By design, documents from TED and PatTR were marked with tags from the remaining domains. Additionally, we compare with a contextaware model (CtxBase) which is similar to CtxPool, but we feed the full context to the context Transformer encoder, without applying max or average pooling beforehand. This model has token-level granular access to the context. We also train a concatenation model (ConcBase) (Tiedemann and Scherrer, 2017) using source-side context.",
"cite_spans": [
{
"start": 115,
"end": 135,
"text": "(Kobus et al., 2017)",
"ref_id": "BIBREF12"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Baselines",
"sec_num": "4.3"
},
{
"text": "In zero-resource domain adaptation experiments, we do not use any data from TED or PatTR, neither as training nor development data. The models are trained on our multi-domain dataset consisting of five domains. The results are shown in Table 2 . We compute statistical significance with paired bootstrap resampling (Koehn, 2004) . SentBase achieves 16.7 and 32.9 BLEU on PatTR and TED respectively. The domains seen during training are more similar to TED in comparison to PatTR which is the reason for the large BLEU score differences. Our proposed models improve on PatTR by up to 0.4 BLEU and on TED by up to 1.0 BLEU. Improvements vary, but all models increase the BLEU score. The TagBase model does not improve significantly over SentBase. Our document-level models are robust across the two domains. These results confirm our assumption that access to document-level context provides for a domain signal. These models are oblivious to the actual characteristics of the domain since it was not seen in training, but presumably, they managed to match the zero-resource domain to a similar one. We assume that the reason for the larger improvements on TED in comparison to PatTR is that TED is a more similar domain to the domains seen in training. As a result, matching TED to seen domains was easier for all models. Table 2 shows that our proposed models improve on PatTR and TED and provides evidence that document-level context is useful for addressing zero-resource domains.",
"cite_spans": [
{
"start": 316,
"end": 329,
"text": "(Koehn, 2004)",
"ref_id": "BIBREF13"
}
],
"ref_spans": [
{
"start": 236,
"end": 244,
"text": "Table 2",
"ref_id": "TABREF2"
},
{
"start": 1322,
"end": 1329,
"text": "Table 2",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Zero-Resource Domain Adaptation",
"sec_num": "5.1"
},
{
"text": "We assume that the improvements on zero-resource domains are because of document-level models having an increased capability to model domain. As a result, we also evaluate on the other domains which were seen during training. We show average BLEU and the BLEU score on the concatenation of all test sets. This is a useful way to evaluate in a multi-domain setting because it is less sensitive to larger improvements on a smaller test set. Table 3 shows the results. We first compare the baseline against DomEmb(avg). The smallest improvement is on NewsCommentary, only 0.2 BLEU. Improvements vary between 0.8 and 1.2 BLEU on Europarl, OpenSubtitles and Rapid. On Ubuntu, this model improves only by 0.4 BLEU. Joint and average BLEU improve by 0.7 and 0.6, respectively. Replacing average pooling with maximum pooling leads to slightly worse results on all domains except Ubuntu, but still improves upon the baseline. Our assumption is that averaging handles situations when there is a mix of domain signals because it can emphasize the more frequent domain signals. Max pooling is not able to differentiate between less and more frequent domain signals.",
"cite_spans": [],
"ref_spans": [
{
"start": 439,
"end": 446,
"text": "Table 3",
"ref_id": "TABREF4"
}
],
"eq_spans": [],
"section": "Evaluating Domains Seen During Training",
"sec_num": "5.2"
},
{
"text": "CtxPool(avg) and DomEmb(avg) perform similarly and have the same average and joint BLEU scores. Max pooling is slightly worse as shown by the performance of CtxPool(max). TagBase is not very effective in our experiments, improving slightly on some domains and only performing well on Ubuntu. We show that document-level context is useful for modeling multiple known domains at the same time. In the appendix we show translation examples from SentBase and DomEmb(avg).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluating Domains Seen During Training",
"sec_num": "5.2"
},
{
"text": "We also investigate the effect of context size on DomEmb(avg). Previous work on context-aware NMT Miculicich et al., 2018) typically showed that large context fails to provide for consistent gains. But this applies to more granular models which resemble the context-aware baseline CtxBase. In contrast, we observe that larger context does provide for improvements. We assume that for DomEmb, access to more context improves the likelihood of encountering domainspecific tokens. We compare different context sizes and show the results in Table 4 . A context size of 1 (ctx=1) obtains the lowest scores on all domains. Using ctx=5 is comparable or slightly worse than ctx=10. Both ctx=1 and ctx=5 get higher scores on Ubuntu and obtain significant improvements over SentBase on the full test set. Significance indicators for ctx=10 compared with respect to SentBase were already presented in Table 3 . Due to resource limitations, we do not conduct a similar study for CtxPool.",
"cite_spans": [
{
"start": 98,
"end": 122,
"text": "Miculicich et al., 2018)",
"ref_id": "BIBREF21"
}
],
"ref_spans": [
{
"start": 537,
"end": 544,
"text": "Table 4",
"ref_id": "TABREF6"
},
{
"start": 890,
"end": 897,
"text": "Table 3",
"ref_id": "TABREF4"
}
],
"eq_spans": [],
"section": "Context Length",
"sec_num": "5.3"
},
{
"text": "Previous work on context-aware NMT has shown improvements in single-domain scenarios. In our work, we put two context-aware models to the test in a multi-domain setup. All models are trained with a 5 sentence context. The results in Table 5 show that all models improve to varying degrees. They perform similarly on NewsCommentary and OpenSubtitles. CtxBase and ConcBase obtain better results on Europarl than DomEmb(avg) and worse on Ubuntu. CtxBase is best on Rapid. Both baselines obtained better scores on TED, showing they have some capacity to transfer to unseen domains. However, both failed to improve on PatTR. We use 5 sentences of context for this experiment. Scaling the baseline models to large context is challenging with regards to computational efficiency and memory usage. In contrast, DomEmb scales easily to larger context. Furthermore, our analysis shows that DomEmb(avg) has the best average and joint score (CtxBase obtains the same joint score), improves on both unseen domains and consistently obtains significant improvements on all domains except NewsCommentary. As previous works show (M\u00fcller et al., 2018) , these contextaware baselines improve fine-grained discourse phenomena such as anaphora resolution. We show in our manual analysis that DomEmb(avg) does not improve anaphoric pronoun translation which indicates that the improvements of our proposed model and the context-aware baselines are orthogonal.",
"cite_spans": [
{
"start": 1112,
"end": 1133,
"text": "(M\u00fcller et al., 2018)",
"ref_id": "BIBREF22"
}
],
"ref_spans": [
{
"start": 233,
"end": 240,
"text": "Table 5",
"ref_id": "TABREF8"
}
],
"eq_spans": [],
"section": "Comparison to Context-Aware Baselines",
"sec_num": "5.4"
},
{
"text": "We also evaluated the translation of domainspecific words. We extracted the most important words from a domain based on TF-IDF scores and selected the top 100 with the highest scores which have more than 3 characters. Next, we follow and compute alignments using fastalign (Dyer et al., 2013) based on the training set and force align the test set source sentences to the references and generated translations. We then compute the F 1 score of the translation of the domainspecific words. Results are shown in Table 6 . We compare SentBase with DomEmb(avg). informal subtitles. Lack of context seems to have biased SentBase to generate more formal translations. We later conduct a manual analysis on the TED test set where we confirm that word sense disambiguation is indeed improved in DomEmb(avg).",
"cite_spans": [
{
"start": 273,
"end": 292,
"text": "(Dyer et al., 2013)",
"ref_id": "BIBREF4"
}
],
"ref_spans": [
{
"start": 510,
"end": 517,
"text": "Table 6",
"ref_id": "TABREF10"
}
],
"eq_spans": [],
"section": "Translation of Domain-Specific Words",
"sec_num": "5.5"
},
{
"text": "We also conduct a classical domain adaptation evaluation where access to in-domain data is allowed. We either use PatTR or TED as in-domain data and evaluate with SentBase and DomEmb(avg). In both cases we consider the concatenation of the remaining domains as out-of-domain. This setup differs from zero-resource domain adaptation because we assume access to in-domain training and dev data. First, we train the baseline and DomEmb(avg) on out-of-domain data. Since these initial models are identical to the ones in the zero-resource setup, we reuse them. We then continue training on the corresponding in-domain data. Table 7 shows the results for PatTR. Fine-tuning the baseline and DomEmb(avg) on PatTR improves BLEU by a large margin, both obtaining 34.4 BLEU. The results are unsurprising because our model is tailored to multi-domain setups and is unlikely to contribute to large improvements when fine-tuning on a single domain. Identifying the domain in such a case is trivial and using large context should not be helpful.",
"cite_spans": [],
"ref_spans": [
{
"start": 620,
"end": 627,
"text": "Table 7",
"ref_id": "TABREF11"
}
],
"eq_spans": [],
"section": "Domain Adaptation with Available In-Domain Data",
"sec_num": "5.6"
},
{
"text": "The strengths of our approach come to light by comparing it against SentBase in an ensembling scenario as in Freitag and Al-Onaizan (2016) . We ensemble DomEmb(avg) trained on out-ofdomain data with DomEmb(avg) fine-tuned on indomain data and do the same for SentBase. The DomEmb(avg) ensemble is better than the Sent-Base ensemble on all domains and on joint BLEU. Similar results are obtained when fine-tuning on TED which are shown in Table 8 .",
"cite_spans": [
{
"start": 109,
"end": 138,
"text": "Freitag and Al-Onaizan (2016)",
"ref_id": "BIBREF8"
}
],
"ref_spans": [
{
"start": 438,
"end": 445,
"text": "Table 8",
"ref_id": "TABREF12"
}
],
"eq_spans": [],
"section": "Domain Adaptation with Available In-Domain Data",
"sec_num": "5.6"
},
{
"text": "We previously hypothesized that our models will benefit from context from different documents within the same domain. We conduct an ablation study to test this assumption using DomEmb(avg) model, similar to the study in (Kobus et al., 2017) , where they investigated the effect of giving the wrong domain tag to every sentence.",
"cite_spans": [
{
"start": 220,
"end": 240,
"text": "(Kobus et al., 2017)",
"ref_id": "BIBREF12"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Ablation",
"sec_num": "5.7"
},
{
"text": "For DomEmb(avg), we simulate this approach by replacing the real contextual representation of each test sentence with C d , which is context representative of domain d. We first compute i is the average of the context token-level embeddings for sentence i. Finally,",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Ablation",
"sec_num": "5.7"
},
{
"text": "C d = 1 N d N d i=1 c d i where c d i is",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Ablation",
"sec_num": "5.7"
},
{
"text": "C d = arg max c d i cos(c d i , C d ).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Ablation",
"sec_num": "5.7"
},
{
"text": "This procedure is conducted for each domain d separately. Table 9 shows the results. On OpenSubtitles, Rapid, PatTR and TED, DomEmb(avg) improves on the sentence-level baseline if presented with context from the same domain (which is usually not from the same document). On Europarl, News-Commentary and Ubuntu, it performs similarly to the baseline. In almost all cases, providing a mismatched context degrades the performance of the original DomEmb(avg). The results show that the model is relatively robust to incorrect but closely related context which provides evidence for our hypothesis that DomEmb captures domain-relevant features. However, the correct context is important to obtain the best results across all domains. Our finding is in contrast with recent results (Li et al., 2020) where they show that multi-encoder context-aware NMT models do not encode contextual information.",
"cite_spans": [
{
"start": 777,
"end": 794,
"text": "(Li et al., 2020)",
"ref_id": "BIBREF14"
}
],
"ref_spans": [
{
"start": 58,
"end": 65,
"text": "Table 9",
"ref_id": "TABREF13"
}
],
"eq_spans": [],
"section": "Ablation",
"sec_num": "5.7"
},
{
"text": "We conduct a manual analysis of SentBase and DomEmb(avg) by inspecting them on the TED test set. We only consider translation differences related to word senses and ignore other types of mistakes. We find 156 cases where the two models translate a word in a different sense and at least one of them outputs the correct sense. We define 3 categories: (i) one model is correct while the other wrong; (ii) both are correct, but one is closer to the actual meaning and (iii) both are correct, but one matches the reference translation. DomEmb(avg) is better on (i) in 43 cases as opposed to the 19 cases where SentBase is better. The ratio of DomEmb(avg) being correct in contrast to SentBase is 23/12 in (ii) and 38/21 in (iii). This shows that DomEmb(avg) is better at coherence which is closely related to better domain modeling in multi-domain setups where the number of probable senses is larger than in a single domain. Furthermore, we find that DomEmb(avg) does not improve on pronoun translation. In fact, in several cases it introduced errors, thus ruling out better coreference resolution as a source of improvements.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Manual Analysis",
"sec_num": "5.8"
},
{
"text": "We presented document-level context-aware NMT models and showed their effectiveness in addressing zero-resource domains. We compared against strong baselines and showed that document-level context can be leveraged to obtain domain signals. The proposed models benefit from large context and also obtain strong performance in multidomain scenarios. Our experimental results show the proposed models obtain improvements of up to 1.0 BLEU in this difficult zero-resource domain setup. Furthermore, they show that document-level context should be further explored in future work on domain adaptation and suggest that larger context would be beneficial for other discourse phenomena such as coherence.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "6"
},
{
"text": "We tokenize all sentences using the script from Moses 2 . We apply BPE splitting 3 with 32K merge operations. We exclude TED and PatTR when computing the BPEs. The BPEs are computed jointly on the source and target data. Samples where the source or target are larger than 100 tokens are removed. We also apply a per-sentence limit of 100 tokens on the context, meaning that models trained on 10 sentences of context have a limit of 1000 tokens. A batch size of 4096 is used for all models.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "A Preprocessing and Hyperparameters",
"sec_num": null
},
{
"text": "We first train a sentence-level baseline until convergence based on early-stopping. All contextaware models are initialized with the parameters from this pretrained sentence-level baseline. Parameters that are specific to the models' architectures are randomly initialized. All proposed models in this work share the source, target, output and context embeddings. The models' architecture is a 6 layer encoder/decoder Transformer with 8 attention heads. The embedding and model size is 512 and the size of the feed-forward layers is 2048. The number of parameters for all models is shown in Table 10 . We use label smoothing with 0.1 and dropout in the Transformer of 0.1. Models are trained on 2 GTX 1080 Ti GPUs with 11GB RAM.",
"cite_spans": [],
"ref_spans": [
{
"start": 591,
"end": 599,
"text": "Table 10",
"ref_id": "TABREF14"
}
],
"eq_spans": [],
"section": "A Preprocessing and Hyperparameters",
"sec_num": null
},
{
"text": "parameters SentBase 61M CtxBase 74M CtxPool 74M DomEmb(avg) 63M The initial learning rate for the document-level models is 10 \u22124 . For the classical domain adaptation scenario with fine-tuning, we use a learning rate of 10 \u22125 in order not to deviate too much from the well-initialized out-of-domain model. We lower the learning rate by a factor of 0.7 if no improvements are observed on the validation perplexity in 8 checkpoints. A checkpoint is saved every 4000 updates. We did not do any systematic hyperparameter search.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Model",
"sec_num": null
},
{
"text": "We use the document-aligned versions of Europarl, NewsCommentary and Rapid from WMT 2019 6 . We also use OpenSubtitles 78 (Lison and Tiedemann, 2016) , Ubuntu 9 , PatTR 10 and TED 11 .",
"cite_spans": [
{
"start": 122,
"end": 149,
"text": "(Lison and Tiedemann, 2016)",
"ref_id": "BIBREF15"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "B Datasets",
"sec_num": null
},
{
"text": "In Table 11, Table 12 and Table 13 we present BLEU scores on the development sets for all the experiments we ran. We only show results for the sets we actually used during training and therefore ignore TED and PatTR for which we had no access to data at training time. The results for TagBase are with oracle domain tags. For the experiments with continued training on TED and PatTR, we show results only on the development sets for TED and PatTR.",
"cite_spans": [],
"ref_spans": [
{
"start": 3,
"end": 34,
"text": "Table 11, Table 12 and Table 13",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "C Validation performance",
"sec_num": null
},
{
"text": "In this section, we compare the computational efficiency of our proposed methods. We compare how many seconds on average are needed to translate a sentence from the test set. The average times are 0.2588, 0.2763 \u00b1 0.0124, 0.3662 for SentBase, DomEmb and CtxPool, respectively. DomEmb is insignificantly slower than the sentence-level baseline, in contrast to CtxPool, which is to be expected considering the additional applying of self-attention over the compressed context. In terms of training time, SentBase converged after 90 hours of training, DomEmb(avg) after 168h and CtxPool(avg) after 116h. ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "D Computational Efficiency",
"sec_num": null
},
{
"text": "In Table 14 we show some example translations from the sentence-level baseline and our DomEmb(avg) model. We show examples where our model corrected erroneous translations from the baseline. Some of the proper translations should be evident from the main sentence itself, but some can only be inferred from context. The first four examples are from TED and the last from PatTR. In the first example, we can see that the sentencelevel baseline translates \"students\" as \"Studenten\" (university students), but the correct translation in this case is \"Sch\u00fcler\" (elementary or high school student). The main sentence itself is not informative enough for the sentence-level model to make this distinction. In contrast, the DomEmb model has access to more information which provides for the appropriate bias towards the correct translation.",
"cite_spans": [],
"ref_spans": [
{
"start": 3,
"end": 11,
"text": "Table 14",
"ref_id": "TABREF6"
}
],
"eq_spans": [],
"section": "E Examples",
"sec_num": null
},
{
"text": "The second sentence depicts an example where it's nearly impossible for the baseline to make a correct prediction for the translation of \"ambassador\" because it depends on whether the person is male (Botschafter) or female (Botschafterin). In the third example, the sentence-level model translated \"model\" as in \"a role model\" (Vorbild), but the context indicates that the speaker talks about \"fashion models\".",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "E Examples",
"sec_num": null
},
{
"text": "Examples 4 and 5 are relatively unintuitive because the main sentences themselves should be enough to infer the correct translation. In example 4, \"reflect\" refers to the physical process of reflection and should not be translated as in \"to reflect on oneself\" (\"denken\"), while in example 5, \"raise\" refers to the action of \"lifting\" or \"elevating\"(\"aufw\u00e4rtsbewegt\" or \"hochzuziehen\") some object instead of \"raising\" as in \"raising a plant (from a seed)\" (\"z\u00fcchten\").",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "E Examples",
"sec_num": null
},
{
"text": "The last example shows that the sentence-level model translates \"springs\" (\"Federn\" which is a part of the compound word \"Druckfedern\" in the reference) as in \"water springs\" (\"Quellen\" which is a part of the compound word \"Kompressionsquellen\") while it should be translated instead as in the physical elastic device. However, in other test sentences, both SentBase and DomEmb(avg) translated \"spring\" as a season, even though this should be less likely in PatTR, showing that our model does not always succeed in capturing domain perfectly.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "E Examples",
"sec_num": null
},
{
"text": "We all knew we were risking our lives -the teacher, the students and our parents. Reference Wir alle wussten, dass wir unser Leben riskierten: Lehrer, Sch\u00fcler und unsere Eltern. SentBase Wir alle wussten, dass wir unser Leben riskieren... den Lehrer, die Studenten und unsere Eltern. DomEmb(avg) Wir wussten alle, dass wir unser Leben riskierten. Der Lehrer, die Sch\u00fcler und unsere Eltern. Source That's why I am a global ambassador for 10x10, a global campaign to educate women. Reference Deshalb bin ich globale Botschafterin f\u00fcr 10x10, einer weltweiten Kampagne f\u00fcr die Bildung von Frauen. SentBase Aus diesem Grund bin ich ein globaler Botschafter f\u00fcr 10x10, eine weltweite Kampagne zur Ausbildung von Frauen. DomEmb(avg) Deshalb bin ich eine globale Botschafterin f\u00fcr 10x10, eine weltweite Kampagne zur Ausbildung von Frauen. Source And I am on this stage because I am a model. Reference Und ich stehe auf dieser B\u00fchne, weil ich ein Model bin. SentBase Und ich bin auf dieser B\u00fchne, weil ich ein Vorbild bin. DomEmb(avg) Und ich bin auf dieser B\u00fchne, weil ich ein Model bin. Source It's going to bounce, go inside the room, some of that is going to reflect back on the door ... Reference Es wird abprallen, in den Raum gehen, ein Teil davon wird wieder zur\u00fcck auf die T\u00fcr reflektiert ... SentBase Es wird abprallen, ins Zimmer gehen, etwas davon wird wieder an die T\u00fcr denken ...",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Source",
"sec_num": null
},
{
"text": "Es wird abprallen, ins Zimmer gehen, etwas davon wird wieder\u00fcber die T\u00fcr reflektieren ... Source Tie member 60 is driven to raise movable cone 58 ... Reference Mit dem Zugelement 60 wird durch den An der bewegliche Kegel 58 aufw\u00e4rtsbewegt ... SentBase Tie-Mitglied 60 wird angetrieben, bewegliche Konfit\u00fcre 58 zu z\u00fcchten ...",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "DomEmb(avg)",
"sec_num": null
},
{
"text": "Teemitglied 60 wird angetrieben, bewegliche Kegel 58 hochzuziehen ... Source It is only when a certain pressure level is reached that the pistons are pushed back against the action of the compression springs ... Reference Erst bei Erreichen eines bestimmten Druckniveaus werden die Kolben gegen die Wirkung der Druckfedern zur\u00fcckgeschoben ... SentBase Erst wenn ein gewisses Druckniveau erreicht ist, werden die Pistonen gegen die Wirkung der Kompressionsquellen zur\u00fcckgedr\u00e4ngt ...",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "DomEmb(avg)",
"sec_num": null
},
{
"text": "Erst wenn ein bestimmtes Druckniveau erreicht ist, werden die Pistonen gegen die Wirkung der Kompressionsfedern zur\u00fcckgedr\u00e4ngt ... ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "DomEmb(avg)",
"sec_num": null
},
{
"text": "https://www.cis.uni-muenchen.de/ dario/projects/zero_domain",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "https://github.com/moses-smt/ mosesdecoder/blob/master/scripts/ tokenizer/tokenizer.perl 3 https://github.com/rsennrich/ subword-nmt Before inference, we average the parameters of the 8 best checkpoints based on the validation perplexity. We use a beam size of 12. BLEU scores are computed on detokenized text using multi-bleudetok.perl from the Moses scripts 4 . For the evaluation of translation of domain-specific words, we used the script from 5 .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [
{
"text": "This work was supported by the European Research Council (ERC) under the European Union's Horizon 2020 research and innovation programme (grant agreement No. 640550) and by the German Research Foundation (DFG; grant FR 2829/4-1).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Acknowledgments",
"sec_num": null
}
],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Non-Parametric Adaptation for Neural Machine Translation",
"authors": [
{
"first": "Ankur",
"middle": [],
"last": "Bapna",
"suffix": ""
},
{
"first": "Orhan",
"middle": [],
"last": "Firat",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
"volume": "",
"issue": "",
"pages": "1921--1931",
"other_ids": {
"DOI": [
"10.18653/v1/N19-1191"
]
},
"num": null,
"urls": [],
"raw_text": "Ankur Bapna and Orhan Firat. 2019. Non-Parametric Adaptation for Neural Machine Translation. In Pro- ceedings of the 2019 Conference of the North Amer- ican Chapter of the Association for Computational Linguistics: Human Language Technologies, Vol- ume 1 (Long and Short Papers), pages 1921-1931, Minneapolis, Minnesota. Association for Computa- tional Linguistics.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Evaluating Discourse Phenomena in Neural Machine Translation",
"authors": [
{
"first": "Rachel",
"middle": [],
"last": "Bawden",
"suffix": ""
},
{
"first": "Rico",
"middle": [],
"last": "Sennrich",
"suffix": ""
},
{
"first": "Alexandra",
"middle": [],
"last": "Birch",
"suffix": ""
},
{
"first": "Barry",
"middle": [],
"last": "Haddow",
"suffix": ""
}
],
"year": 2018,
"venue": "NAACL 2018",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Rachel Bawden, Rico Sennrich, Alexandra Birch, and Barry Haddow. 2018. Evaluating Discourse Phe- nomena in Neural Machine Translation. In NAACL 2018, New Orleans, USA.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Effective Domain Mixing for Neural Machine Translation",
"authors": [
{
"first": "Denny",
"middle": [],
"last": "Britz",
"suffix": ""
},
{
"first": "Quoc",
"middle": [],
"last": "Le",
"suffix": ""
},
{
"first": "Reid",
"middle": [],
"last": "Pryzant",
"suffix": ""
}
],
"year": 2017,
"venue": "Proceedings of the Second Conference on Machine Translation",
"volume": "",
"issue": "",
"pages": "118--126",
"other_ids": {
"DOI": [
"10.18653/v1/W17-4712"
]
},
"num": null,
"urls": [],
"raw_text": "Denny Britz, Quoc Le, and Reid Pryzant. 2017. Effec- tive Domain Mixing for Neural Machine Translation. In Proceedings of the Second Conference on Ma- chine Translation, pages 118-126. Association for Computational Linguistics.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Cost Weighting for Neural Machine Translation Domain Adaptation",
"authors": [
{
"first": "Boxing",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": "Colin",
"middle": [],
"last": "Cherry",
"suffix": ""
},
{
"first": "George",
"middle": [],
"last": "Foster",
"suffix": ""
},
{
"first": "Samuel",
"middle": [],
"last": "Larkin",
"suffix": ""
}
],
"year": 2017,
"venue": "Proceedings of the First Workshop on Neural Machine Translation",
"volume": "",
"issue": "",
"pages": "40--46",
"other_ids": {
"DOI": [
"10.18653/v1/W17-3205"
]
},
"num": null,
"urls": [],
"raw_text": "Boxing Chen, Colin Cherry, George Foster, and Samuel Larkin. 2017. Cost Weighting for Neural Machine Translation Domain Adaptation. In Pro- ceedings of the First Workshop on Neural Machine Translation, pages 40-46. Association for Computa- tional Linguistics.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "A Simple, Fast, and Effective Reparameterization of IBM Model 2",
"authors": [
{
"first": "Chris",
"middle": [],
"last": "Dyer",
"suffix": ""
},
{
"first": "Victor",
"middle": [],
"last": "Chahuneau",
"suffix": ""
},
{
"first": "Noah",
"middle": [
"A"
],
"last": "Smith",
"suffix": ""
}
],
"year": 2013,
"venue": "Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
"volume": "",
"issue": "",
"pages": "644--648",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Chris Dyer, Victor Chahuneau, and Noah A. Smith. 2013. A Simple, Fast, and Effective Reparameter- ization of IBM Model 2. In Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Hu- man Language Technologies, pages 644-648. Asso- ciation for Computational Linguistics.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Multi-Domain Neural Machine Translation through Unsupervised Adaptation",
"authors": [
{
"first": "M",
"middle": [],
"last": "Amin Farajian",
"suffix": ""
},
{
"first": "Marco",
"middle": [],
"last": "Turchi",
"suffix": ""
},
{
"first": "Matteo",
"middle": [],
"last": "Negri",
"suffix": ""
},
{
"first": "Marcello",
"middle": [],
"last": "Federico",
"suffix": ""
}
],
"year": 2017,
"venue": "Proceedings of the Second Conference on Machine Translation",
"volume": "",
"issue": "",
"pages": "127--137",
"other_ids": {
"DOI": [
"10.18653/v1/W17-4713"
]
},
"num": null,
"urls": [],
"raw_text": "M. Amin Farajian, Marco Turchi, Matteo Negri, and Marcello Federico. 2017. Multi-Domain Neural Ma- chine Translation through Unsupervised Adaptation. In Proceedings of the Second Conference on Ma- chine Translation, pages 127-137. Association for Computational Linguistics.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Discriminative instance weighting for domain adaptation in statistical machine translation",
"authors": [
{
"first": "George",
"middle": [],
"last": "Foster",
"suffix": ""
},
{
"first": "Cyril",
"middle": [],
"last": "Goutte",
"suffix": ""
},
{
"first": "Roland",
"middle": [],
"last": "Kuhn",
"suffix": ""
}
],
"year": 2010,
"venue": "Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "451--459",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "George Foster, Cyril Goutte, and Roland Kuhn. 2010. Discriminative instance weighting for domain adap- tation in statistical machine translation. In Proceed- ings of the 2010 Conference on Empirical Meth- ods in Natural Language Processing, pages 451- 459, Cambridge, MA. Association for Computa- tional Linguistics.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Mixture-model adaptation for SMT",
"authors": [
{
"first": "George",
"middle": [],
"last": "Foster",
"suffix": ""
},
{
"first": "Roland",
"middle": [],
"last": "Kuhn",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of the Second Workshop on Statistical Machine Translation",
"volume": "",
"issue": "",
"pages": "128--135",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "George Foster and Roland Kuhn. 2007. Mixture-model adaptation for SMT. In Proceedings of the Second Workshop on Statistical Machine Translation, pages 128-135, Prague, Czech Republic. Association for Computational Linguistics.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Fast Domain Adaptation for Neural Machine Translation",
"authors": [
{
"first": "Markus",
"middle": [],
"last": "Freitag",
"suffix": ""
},
{
"first": "Yaser",
"middle": [],
"last": "Al-Onaizan",
"suffix": ""
}
],
"year": 2016,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Markus Freitag and Yaser Al-Onaizan. 2016. Fast Do- main Adaptation for Neural Machine Translation. CoRR, abs/1612.06897.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Sockeye: A Toolkit for Neural Machine Translation",
"authors": [
{
"first": "Felix",
"middle": [],
"last": "Hieber",
"suffix": ""
},
{
"first": "Tobias",
"middle": [],
"last": "Domhan",
"suffix": ""
},
{
"first": "Michael",
"middle": [],
"last": "Denkowski",
"suffix": ""
},
{
"first": "David",
"middle": [],
"last": "Vilar",
"suffix": ""
}
],
"year": 2017,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Felix Hieber, Tobias Domhan, Michael Denkowski, David Vilar, Artem Sokolov, Ann Clifton, and Matt Post. 2017. Sockeye: A Toolkit for Neural Machine Translation. ArXiv e-prints.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Microsoft Translator at WMT 2019: Towards Large-Scale Document-Level Neural Machine Translation",
"authors": [
{
"first": "Marcin",
"middle": [],
"last": "Junczys-Dowmunt",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the Fourth Conference on Machine Translation",
"volume": "2",
"issue": "",
"pages": "225--233",
"other_ids": {
"DOI": [
"10.18653/v1/W19-5321"
]
},
"num": null,
"urls": [],
"raw_text": "Marcin Junczys-Dowmunt. 2019. Microsoft Transla- tor at WMT 2019: Towards Large-Scale Document- Level Neural Machine Translation. In Proceedings of the Fourth Conference on Machine Translation (Volume 2: Shared Task Papers, Day 1), pages 225- 233, Florence, Italy. Association for Computational Linguistics.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "When and why is document-level context useful in neural machine translation?",
"authors": [
{
"first": "Yunsu",
"middle": [],
"last": "Kim",
"suffix": ""
},
{
"first": "Thanh",
"middle": [],
"last": "Tran",
"suffix": ""
},
{
"first": "Hermann",
"middle": [],
"last": "Ney",
"suffix": ""
}
],
"year": 2019,
"venue": "DiscoMT@EMNLP",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yunsu Kim, Thanh Tran, and Hermann Ney. 2019. When and why is document-level context useful in neural machine translation? In DiscoMT@EMNLP.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Domain Control for Neural Machine Translation",
"authors": [
{
"first": "Catherine",
"middle": [],
"last": "Kobus",
"suffix": ""
},
{
"first": "Josep",
"middle": [],
"last": "Crego",
"suffix": ""
},
{
"first": "Jean",
"middle": [],
"last": "Senellart",
"suffix": ""
}
],
"year": 2017,
"venue": "Proceedings of the International Conference Recent Advances in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "372--378",
"other_ids": {
"DOI": [
"10.26615/978-954-452-049-6_049"
]
},
"num": null,
"urls": [],
"raw_text": "Catherine Kobus, Josep Crego, and Jean Senellart. 2017. Domain Control for Neural Machine Transla- tion. In Proceedings of the International Conference Recent Advances in Natural Language Processing, RANLP 2017, pages 372-378. INCOMA Ltd.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Statistical Significance Tests for Machine Translation Evaluation",
"authors": [
{
"first": "Philipp",
"middle": [],
"last": "Koehn",
"suffix": ""
}
],
"year": 2004,
"venue": "Proceedings of the 2004 Conference on Empirical Methods in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "388--395",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Philipp Koehn. 2004. Statistical Significance Tests for Machine Translation Evaluation. In Proceed- ings of the 2004 Conference on Empirical Meth- ods in Natural Language Processing, pages 388- 395, Barcelona, Spain. Association for Computa- tional Linguistics.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Does multi-encoder help? a case study on contextaware neural machine translation",
"authors": [
{
"first": "Bei",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "Hui",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Ziyang",
"middle": [],
"last": "Wang",
"suffix": ""
},
{
"first": "Yufan",
"middle": [],
"last": "Jiang",
"suffix": ""
},
{
"first": "Tong",
"middle": [],
"last": "Xiao",
"suffix": ""
},
{
"first": "Jingbo",
"middle": [],
"last": "Zhu",
"suffix": ""
},
{
"first": "Tongran",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Changliang",
"middle": [],
"last": "Li",
"suffix": ""
}
],
"year": 2020,
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "3512--3518",
"other_ids": {
"DOI": [
"10.18653/v1/2020.acl-main.322"
]
},
"num": null,
"urls": [],
"raw_text": "Bei Li, Hui Liu, Ziyang Wang, Yufan Jiang, Tong Xiao, Jingbo Zhu, Tongran Liu, and Changliang Li. 2020. Does multi-encoder help? a case study on context- aware neural machine translation. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 3512-3518, On- line. Association for Computational Linguistics.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "OpenSub-titles2016: Extracting large parallel corpora from movie and TV subtitles",
"authors": [
{
"first": "Pierre",
"middle": [],
"last": "Lison",
"suffix": ""
},
{
"first": "J\u00f6rg",
"middle": [],
"last": "Tiedemann",
"suffix": ""
}
],
"year": 2016,
"venue": "Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16)",
"volume": "",
"issue": "",
"pages": "923--929",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Pierre Lison and J\u00f6rg Tiedemann. 2016. OpenSub- titles2016: Extracting large parallel corpora from movie and TV subtitles. In Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16), pages 923-929, Por- toro\u017e, Slovenia. European Language Resources As- sociation (ELRA).",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "Handling Homographs in Neural Machine Translation",
"authors": [
{
"first": "Frederick",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Han",
"middle": [],
"last": "Lu",
"suffix": ""
},
{
"first": "Graham",
"middle": [],
"last": "Neubig",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
"volume": "1",
"issue": "",
"pages": "1336--1345",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Frederick Liu, Han Lu, and Graham Neubig. 2018. Handling Homographs in Neural Machine Transla- tion. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Com- putational Linguistics: Human Language Technolo- gies, Volume 1 (Long Papers), pages 1336-1345.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "Stanford Neural Machine Translation Systems for Spoken Language Domains",
"authors": [
{
"first": "Minh-Thang",
"middle": [],
"last": "Luong",
"suffix": ""
},
{
"first": "Christopher D",
"middle": [],
"last": "Manning",
"suffix": ""
}
],
"year": 2015,
"venue": "Proceedings of the International Workshop on Spoken Language Translation",
"volume": "",
"issue": "",
"pages": "76--79",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Minh-Thang Luong and Christopher D Manning. 2015. Stanford Neural Machine Translation Systems for Spoken Language Domains. In Proceedings of the International Workshop on Spoken Language Trans- lation, pages 76-79.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "Using whole document context in neural machine translation",
"authors": [
{
"first": "Valentin",
"middle": [],
"last": "Mac\u00e9",
"suffix": ""
},
{
"first": "Christophe",
"middle": [],
"last": "Servan",
"suffix": ""
}
],
"year": 2019,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"arXiv": [
"arXiv:1910.07481"
]
},
"num": null,
"urls": [],
"raw_text": "Valentin Mac\u00e9 and Christophe Servan. 2019. Using whole document context in neural machine transla- tion. arXiv preprint arXiv:1910.07481.",
"links": null
},
"BIBREF19": {
"ref_id": "b19",
"title": "Document Context Neural Machine Translation with Memory Networks",
"authors": [
{
"first": "Sameen",
"middle": [],
"last": "Maruf",
"suffix": ""
},
{
"first": "Gholamreza",
"middle": [],
"last": "Haffari",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics",
"volume": "1",
"issue": "",
"pages": "1275--1284",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Sameen Maruf and Gholamreza Haffari. 2018. Doc- ument Context Neural Machine Translation with Memory Networks. In Proceedings of the 56th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1275- 1284, Melbourne, Australia. Association for Compu- tational Linguistics.",
"links": null
},
"BIBREF20": {
"ref_id": "b20",
"title": "Selective Attention for Contextaware Neural Machine Translation",
"authors": [
{
"first": "Sameen",
"middle": [],
"last": "Maruf",
"suffix": ""
},
{
"first": "F",
"middle": [
"T"
],
"last": "Andr\u00e9",
"suffix": ""
},
{
"first": "Gholamreza",
"middle": [],
"last": "Martins",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Haffari",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
"volume": "1",
"issue": "",
"pages": "3092--3102",
"other_ids": {
"DOI": [
"10.18653/v1/N19-1313"
]
},
"num": null,
"urls": [],
"raw_text": "Sameen Maruf, Andr\u00e9 F. T. Martins, and Gholam- reza Haffari. 2019. Selective Attention for Context- aware Neural Machine Translation. In Proceedings of the 2019 Conference of the North American Chap- ter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 3092-3102, Minneapolis, Minnesota. Association for Computational Linguis- tics.",
"links": null
},
"BIBREF21": {
"ref_id": "b21",
"title": "Document-Level Neural Machine Translation with Hierarchical Attention Networks",
"authors": [
{
"first": "Lesly",
"middle": [],
"last": "Miculicich",
"suffix": ""
},
{
"first": "Dhananjay",
"middle": [],
"last": "Ram",
"suffix": ""
},
{
"first": "Nikolaos",
"middle": [],
"last": "Pappas",
"suffix": ""
},
{
"first": "James",
"middle": [],
"last": "Henderson",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "2947--2954",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Lesly Miculicich, Dhananjay Ram, Nikolaos Pappas, and James Henderson. 2018. Document-Level Neu- ral Machine Translation with Hierarchical Attention Networks. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Pro- cessing, pages 2947-2954. Association for Compu- tational Linguistics.",
"links": null
},
"BIBREF22": {
"ref_id": "b22",
"title": "A Large-Scale Test Set for the Evaluation of Context-Aware Pronoun Translation in Neural Machine Translation",
"authors": [
{
"first": "Mathias",
"middle": [],
"last": "M\u00fcller",
"suffix": ""
},
{
"first": "Annette",
"middle": [],
"last": "Rios",
"suffix": ""
},
{
"first": "Elena",
"middle": [],
"last": "Voita",
"suffix": ""
},
{
"first": "Rico",
"middle": [],
"last": "Sennrich",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the Third Conference on Machine Translation",
"volume": "1",
"issue": "",
"pages": "61--72",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mathias M\u00fcller, Annette Rios, Elena Voita, and Rico Sennrich. 2018. A Large-Scale Test Set for the Evaluation of Context-Aware Pronoun Translation in Neural Machine Translation. In Proceedings of the Third Conference on Machine Translation, Vol- ume 1: Research Papers, pages 61-72, Brussels, Belgium. Association for Computational Linguis- tics.",
"links": null
},
"BIBREF23": {
"ref_id": "b23",
"title": "Machine translation system selection from bandit feedback",
"authors": [
{
"first": "Jason",
"middle": [],
"last": "Naradowsky",
"suffix": ""
},
{
"first": "Xuan",
"middle": [],
"last": "Zhang",
"suffix": ""
},
{
"first": "Kevin",
"middle": [],
"last": "Duh",
"suffix": ""
}
],
"year": 2020,
"venue": "Proceedings of the 14th Conference of the Association for Machine Translation in the Americas",
"volume": "1",
"issue": "",
"pages": "50--63",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jason Naradowsky, Xuan Zhang, and Kevin Duh. 2020. Machine translation system selection from bandit feedback. In Proceedings of the 14th Conference of the Association for Machine Translation in the Americas (Volume 1: Research Track), pages 50-63, Virtual. Association for Machine Translation in the Americas.",
"links": null
},
"BIBREF24": {
"ref_id": "b24",
"title": "Neural Machine Translation Training in a Multi-Domain Scenario",
"authors": [
{
"first": "Hassan",
"middle": [],
"last": "Sajjad",
"suffix": ""
},
{
"first": "Nadir",
"middle": [],
"last": "Durrani",
"suffix": ""
},
{
"first": "Fahim",
"middle": [],
"last": "Dalvi",
"suffix": ""
},
{
"first": "Yonatan",
"middle": [],
"last": "Belinkov",
"suffix": ""
},
{
"first": "Stephan",
"middle": [],
"last": "Vogel",
"suffix": ""
}
],
"year": 2017,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hassan Sajjad, Nadir Durrani, Fahim Dalvi, Yonatan Belinkov, and Stephan Vogel. 2017. Neural Ma- chine Translation Training in a Multi-Domain Sce- nario. CoRR, abs/1708.08712.",
"links": null
},
"BIBREF25": {
"ref_id": "b25",
"title": "Coreference and Coherence in Neural Machine Translation: A Study Using Oracle Experiments",
"authors": [
{
"first": "Dario",
"middle": [],
"last": "Stojanovski",
"suffix": ""
},
{
"first": "Alexander",
"middle": [],
"last": "Fraser",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the Third Conference on Machine Translation",
"volume": "1",
"issue": "",
"pages": "49--60",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Dario Stojanovski and Alexander Fraser. 2018. Coref- erence and Coherence in Neural Machine Transla- tion: A Study Using Oracle Experiments. In Pro- ceedings of the Third Conference on Machine Trans- lation, Volume 1: Research Papers, pages 49-60, Brussels, Belgium. Association for Computational Linguistics.",
"links": null
},
"BIBREF26": {
"ref_id": "b26",
"title": "Combining local and document-level context: The lmu munich neural machine translation system at wmt19",
"authors": [
{
"first": "Dario",
"middle": [],
"last": "Stojanovski",
"suffix": ""
},
{
"first": "Alexander",
"middle": [],
"last": "Fraser",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the Fourth Conference on Machine Translation",
"volume": "2",
"issue": "",
"pages": "400--406",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Dario Stojanovski and Alexander Fraser. 2019a. Com- bining local and document-level context: The lmu munich neural machine translation system at wmt19. In Proceedings of the Fourth Conference on Ma- chine Translation (Volume 2: Shared Task Papers, Day 1), pages 400-406, Florence, Italy. Association for Computational Linguistics.",
"links": null
},
"BIBREF27": {
"ref_id": "b27",
"title": "Improving Anaphora Resolution in Neural Machine Translation Using Curriculum Learning",
"authors": [
{
"first": "Dario",
"middle": [],
"last": "Stojanovski",
"suffix": ""
},
{
"first": "Alexander",
"middle": [],
"last": "Fraser",
"suffix": ""
}
],
"year": 2019,
"venue": "Research Track",
"volume": "1",
"issue": "",
"pages": "140--150",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Dario Stojanovski and Alexander Fraser. 2019b. Im- proving Anaphora Resolution in Neural Machine Translation Using Curriculum Learning. In Proceed- ings of Machine Translation Summit XVII Volume 1: Research Track, pages 140-150, Dublin, Ireland. Eu- ropean Association for Machine Translation.",
"links": null
},
"BIBREF28": {
"ref_id": "b28",
"title": "Hierarchical Modeling of Global Context for Document-Level Neural Machine Translation",
"authors": [
{
"first": "Xin",
"middle": [],
"last": "Tan",
"suffix": ""
},
{
"first": "Longyin",
"middle": [],
"last": "Zhang",
"suffix": ""
},
{
"first": "Deyi",
"middle": [],
"last": "Xiong",
"suffix": ""
},
{
"first": "Guodong",
"middle": [],
"last": "Zhou",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)",
"volume": "",
"issue": "",
"pages": "1576--1585",
"other_ids": {
"DOI": [
"10.18653/v1/D19-1168"
]
},
"num": null,
"urls": [],
"raw_text": "Xin Tan, Longyin Zhang, Deyi Xiong, and Guodong Zhou. 2019. Hierarchical Modeling of Global Con- text for Document-Level Neural Machine Transla- tion. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natu- ral Language Processing (EMNLP-IJCNLP), pages 1576-1585, Hong Kong, China. Association for Computational Linguistics.",
"links": null
},
"BIBREF30": {
"ref_id": "b30",
"title": "Neural Machine Translation with Extended Context",
"authors": [
{
"first": "J\u00f6rg",
"middle": [],
"last": "Tiedemann",
"suffix": ""
},
{
"first": "Yves",
"middle": [],
"last": "Scherrer",
"suffix": ""
}
],
"year": 2017,
"venue": "Proceedings of the Third Workshop on Discourse in Machine Translation",
"volume": "",
"issue": "",
"pages": "82--92",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "J\u00f6rg Tiedemann and Yves Scherrer. 2017. Neural Ma- chine Translation with Extended Context. In Pro- ceedings of the Third Workshop on Discourse in Ma- chine Translation, pages 82-92.",
"links": null
},
"BIBREF31": {
"ref_id": "b31",
"title": "Learning to Remember Translation History with a Continuous Cache",
"authors": [
{
"first": "Zhaopeng",
"middle": [],
"last": "Tu",
"suffix": ""
},
{
"first": "Yang",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Shuming",
"middle": [],
"last": "Shi",
"suffix": ""
},
{
"first": "Tong",
"middle": [],
"last": "Zhang",
"suffix": ""
}
],
"year": 2018,
"venue": "Transactions of the Association for Computational Linguistics",
"volume": "6",
"issue": "",
"pages": "407--420",
"other_ids": {
"DOI": [
"10.1162/tacl_a_00029"
]
},
"num": null,
"urls": [],
"raw_text": "Zhaopeng Tu, Yang Liu, Shuming Shi, and Tong Zhang. 2018. Learning to Remember Translation History with a Continuous Cache. Transactions of the Asso- ciation for Computational Linguistics, 6:407-420.",
"links": null
},
"BIBREF32": {
"ref_id": "b32",
"title": "Attention is All you Need",
"authors": [
{
"first": "Ashish",
"middle": [],
"last": "Vaswani",
"suffix": ""
},
{
"first": "Noam",
"middle": [],
"last": "Shazeer",
"suffix": ""
},
{
"first": "Niki",
"middle": [],
"last": "Parmar",
"suffix": ""
},
{
"first": "Jakob",
"middle": [],
"last": "Uszkoreit",
"suffix": ""
},
{
"first": "Llion",
"middle": [],
"last": "Jones",
"suffix": ""
},
{
"first": "Aidan",
"middle": [
"N"
],
"last": "Gomez",
"suffix": ""
},
{
"first": "\u0141ukasz",
"middle": [],
"last": "Kaiser",
"suffix": ""
},
{
"first": "Illia",
"middle": [],
"last": "Polosukhin",
"suffix": ""
}
],
"year": 2017,
"venue": "Advances in Neural Information Processing Systems",
"volume": "",
"issue": "",
"pages": "6000--6010",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is All you Need. In Advances in Neural Information Pro- cessing Systems, pages 6000-6010.",
"links": null
},
"BIBREF33": {
"ref_id": "b33",
"title": "Context-Aware Monolingual Repair for Neural Machine Translation",
"authors": [
{
"first": "Elena",
"middle": [],
"last": "Voita",
"suffix": ""
},
{
"first": "Rico",
"middle": [],
"last": "Sennrich",
"suffix": ""
},
{
"first": "Ivan",
"middle": [],
"last": "Titov",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)",
"volume": "",
"issue": "",
"pages": "876--885",
"other_ids": {
"DOI": [
"10.18653/v1/D19-1081"
]
},
"num": null,
"urls": [],
"raw_text": "Elena Voita, Rico Sennrich, and Ivan Titov. 2019a. Context-Aware Monolingual Repair for Neural Ma- chine Translation. In Proceedings of the 2019 Con- ference on Empirical Methods in Natural Language Processing and the 9th International Joint Confer- ence on Natural Language Processing (EMNLP- IJCNLP), pages 876-885, Hong Kong, China. As- sociation for Computational Linguistics.",
"links": null
},
"BIBREF34": {
"ref_id": "b34",
"title": "When a Good Translation is Wrong in Context: Context-Aware Machine Translation Improves on Deixis, Ellipsis, and Lexical Cohesion",
"authors": [
{
"first": "Elena",
"middle": [],
"last": "Voita",
"suffix": ""
},
{
"first": "Rico",
"middle": [],
"last": "Sennrich",
"suffix": ""
},
{
"first": "Ivan",
"middle": [],
"last": "Titov",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "1198--1212",
"other_ids": {
"DOI": [
"10.18653/v1/P19-1116"
]
},
"num": null,
"urls": [],
"raw_text": "Elena Voita, Rico Sennrich, and Ivan Titov. 2019b. When a Good Translation is Wrong in Context: Context-Aware Machine Translation Improves on Deixis, Ellipsis, and Lexical Cohesion. In Pro- ceedings of the 57th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 1198- 1212, Florence, Italy. Association for Computational Linguistics.",
"links": null
},
"BIBREF35": {
"ref_id": "b35",
"title": "Context-Aware Neural Machine Translation Learns Anaphora Resolution",
"authors": [
{
"first": "Elena",
"middle": [],
"last": "Voita",
"suffix": ""
},
{
"first": "Pavel",
"middle": [],
"last": "Serdyukov",
"suffix": ""
},
{
"first": "Rico",
"middle": [],
"last": "Sennrich",
"suffix": ""
},
{
"first": "Ivan",
"middle": [],
"last": "Titov",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics",
"volume": "1",
"issue": "",
"pages": "1264--1274",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Elena Voita, Pavel Serdyukov, Rico Sennrich, and Ivan Titov. 2018. Context-Aware Neural Machine Trans- lation Learns Anaphora Resolution. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1264-1274, Melbourne, Australia.",
"links": null
},
"BIBREF36": {
"ref_id": "b36",
"title": "Domain Dependent Statistical Machine Translation",
"authors": [
{
"first": "Jia",
"middle": [],
"last": "Xu",
"suffix": ""
},
{
"first": "Yonggang",
"middle": [],
"last": "Deng",
"suffix": ""
},
{
"first": "Yuqing",
"middle": [],
"last": "Gao",
"suffix": ""
},
{
"first": "Hermann",
"middle": [],
"last": "Ney",
"suffix": ""
}
],
"year": 2007,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jia Xu, Yonggang Deng, Yuqing Gao, and Hermann Ney. 2007. Domain Dependent Statistical Machine Translation. In MT Summit.",
"links": null
},
"BIBREF37": {
"ref_id": "b37",
"title": "Enhancing Context Modeling with a Query-Guided Capsule Network for Document-level Translation",
"authors": [
{
"first": "Zhengxin",
"middle": [],
"last": "Yang",
"suffix": ""
},
{
"first": "Jinchao",
"middle": [],
"last": "Zhang",
"suffix": ""
},
{
"first": "Fandong",
"middle": [],
"last": "Meng",
"suffix": ""
},
{
"first": "Shuhao",
"middle": [],
"last": "Gu",
"suffix": ""
},
{
"first": "Yang",
"middle": [],
"last": "Feng",
"suffix": ""
},
{
"first": "Jie",
"middle": [],
"last": "Zhou",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)",
"volume": "",
"issue": "",
"pages": "1527--1537",
"other_ids": {
"DOI": [
"10.18653/v1/D19-1164"
]
},
"num": null,
"urls": [],
"raw_text": "Zhengxin Yang, Jinchao Zhang, Fandong Meng, Shuhao Gu, Yang Feng, and Jie Zhou. 2019. En- hancing Context Modeling with a Query-Guided Capsule Network for Document-level Translation. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 1527- 1537, Hong Kong, China. Association for Computa- tional Linguistics.",
"links": null
},
"BIBREF38": {
"ref_id": "b38",
"title": "Multi-Domain Neural Machine Translation with Word-Level Domain Context Discrimination",
"authors": [
{
"first": "Jiali",
"middle": [],
"last": "Zeng",
"suffix": ""
},
{
"first": "Jinsong",
"middle": [],
"last": "Su",
"suffix": ""
},
{
"first": "Huating",
"middle": [],
"last": "Wen",
"suffix": ""
},
{
"first": "Yang",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Jun",
"middle": [],
"last": "Xie",
"suffix": ""
},
{
"first": "Yongjing",
"middle": [],
"last": "Yin",
"suffix": ""
},
{
"first": "Jianqiang",
"middle": [],
"last": "Zhao",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "447--457",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jiali Zeng, Jinsong Su, Huating Wen, Yang Liu, Jun Xie, Yongjing Yin, and Jianqiang Zhao. 2018. Multi- Domain Neural Machine Translation with Word- Level Domain Context Discrimination. In Proceed- ings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 447-457. Association for Computational Linguistics.",
"links": null
},
"BIBREF39": {
"ref_id": "b39",
"title": "Improving the Transformer Translation Model with Document-Level Context",
"authors": [
{
"first": "Jiacheng",
"middle": [],
"last": "Zhang",
"suffix": ""
},
{
"first": "Huanbo",
"middle": [],
"last": "Luan",
"suffix": ""
},
{
"first": "Maosong",
"middle": [],
"last": "Sun",
"suffix": ""
},
{
"first": "Feifei",
"middle": [],
"last": "Zhai",
"suffix": ""
},
{
"first": "Jingfang",
"middle": [],
"last": "Xu",
"suffix": ""
},
{
"first": "Min",
"middle": [],
"last": "Zhang",
"suffix": ""
},
{
"first": "Yang",
"middle": [],
"last": "Liu",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "533--542",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jiacheng Zhang, Huanbo Luan, Maosong Sun, Feifei Zhai, Jingfang Xu, Min Zhang, and Yang Liu. 2018. Improving the Transformer Translation Model with Document-Level Context. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 533-542. Association for Computational Linguistics.",
"links": null
},
"BIBREF40": {
"ref_id": "b40",
"title": "Sentence Weighting for Neural Machine Translation Domain Adaptation",
"authors": [
{
"first": "Shiqi",
"middle": [],
"last": "Zhang",
"suffix": ""
},
{
"first": "Deyi",
"middle": [],
"last": "Xiong",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the 27th International Conference on Computational Linguistics",
"volume": "",
"issue": "",
"pages": "3181--3190",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Shiqi Zhang and Deyi Xiong. 2018. Sentence Weight- ing for Neural Machine Translation Domain Adapta- tion. In Proceedings of the 27th International Con- ference on Computational Linguistics, pages 3181- 3190. Association for Computational Linguistics. 4 https://github.com/moses-smt/ mosesdecoder/blob/master/scripts/ generic/multi-bleu-detok.perl 5 https://github.com/frederick0329/ Evaluate-Word-Level-Translation 6 http://statmt.org/wmt19/ translation-task.html 7 http://opus.nlpl.eu/ OpenSubtitles-v2018.php 8 http://www.opensubtitles.org/ 9 http://opus.nlpl.eu/Ubuntu.php 10 http://www.cl.uni-heidelberg.de/ statnlpgroup/pattr/ 11 https://wit3.fbk.eu/2015-01",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"text": "Context-aware Transformer with pooling.",
"type_str": "figure",
"num": null,
"uris": null
},
"FIGREF1": {
"text": "the contextual representation of a test sentence in domain d and N d is the number of test sentences in d. c d",
"type_str": "figure",
"num": null,
"uris": null
},
"TABREF2": {
"num": null,
"content": "<table/>",
"html": null,
"text": "",
"type_str": "table"
},
"TABREF4": {
"num": null,
"content": "<table/>",
"html": null,
"text": "Results on the multi-domain dataset. Joint and average scores including PatTR and TED. Statistical significance computed for all scores except for Average. \u2020-p < 0.01, \u2021-p < 0.05.",
"type_str": "table"
},
"TABREF6": {
"num": null,
"content": "<table><tr><td>: Results using the DomEmb(avg) model with</td></tr><tr><td>different context sizes. Context size in number of pre-</td></tr><tr><td>vious sentences. \u2021-p < 0.01, ** -p < 0.05, compared</td></tr><tr><td>to SentBase. \u2020-p < 0.01, * -p < 0.05, compared to</td></tr><tr><td>ctx=1.</td></tr></table>",
"html": null,
"text": "",
"type_str": "table"
},
"TABREF8": {
"num": null,
"content": "<table/>",
"html": null,
"text": "",
"type_str": "table"
},
"TABREF10": {
"num": null,
"content": "<table><tr><td>domain</td><td colspan=\"2\">SentBase DomEmb(a)</td></tr><tr><td>PatTR</td><td>34.4</td><td>34.4</td></tr><tr><td/><td/><td>ensemble</td></tr><tr><td>Europarl</td><td>29.0</td><td>29.6 \u2020</td></tr><tr><td>NewsCommentary</td><td>28.7</td><td>28.9</td></tr><tr><td>OpenSubtitles</td><td>22.8</td><td>23.4 \u2020</td></tr><tr><td>Rapid</td><td>35.1</td><td>35.7 \u2020</td></tr><tr><td>Ubuntu</td><td>33.0</td><td>33.4</td></tr><tr><td>PatTR</td><td>29.2</td><td>29.4</td></tr><tr><td>TED</td><td>29.8</td><td>30.4 \u2021</td></tr><tr><td>Average</td><td>29.7</td><td>30.1</td></tr><tr><td>Joint</td><td>30.2</td><td>30.6 \u2020</td></tr></table>",
"html": null,
"text": "F 1 score for domain-specific words.",
"type_str": "table"
},
"TABREF11": {
"num": null,
"content": "<table><tr><td colspan=\"3\">: Domain adaptation results on PatTR for Sent-</td></tr><tr><td colspan=\"3\">Base and DomEmb(avg). \u2020-p < 0.01, \u2021-p < 0.05.</td></tr><tr><td>domain</td><td colspan=\"2\">SentBase DomEmb(a)</td></tr><tr><td>TED</td><td>36.1</td><td>36.6 \u2021</td></tr><tr><td/><td/><td>ensemble</td></tr><tr><td>Europarl</td><td>30.4</td><td>30.8 \u2020</td></tr><tr><td>NewsCommentary</td><td>31.9</td><td>32.2 \u2021</td></tr><tr><td>OpenSubtitles</td><td>24.6</td><td>25.4 \u2020</td></tr><tr><td>Rapid</td><td>38.8</td><td>39.5 \u2020</td></tr><tr><td>Ubuntu</td><td>32.7</td><td>32.4</td></tr><tr><td>PatTR</td><td>16.9</td><td>17.0 \u2021</td></tr><tr><td>TED</td><td>35.4</td><td>35.8 \u2021</td></tr><tr><td>Average</td><td>30.1</td><td>30.4</td></tr><tr><td>Joint</td><td>28.4</td><td>28.8 \u2020</td></tr></table>",
"html": null,
"text": "",
"type_str": "table"
},
"TABREF12": {
"num": null,
"content": "<table><tr><td>: Domain adaptation results on TED for Sent-</td></tr><tr><td>Base and DomEmb(avg). \u2020-p < 0.01, \u2021-p < 0.05.</td></tr><tr><td>DomEmb(avg) improved the F 1 score across all</td></tr><tr><td>domains with the largest improvements on Open-</td></tr><tr><td>Subtitles and TED. Our assumption is that the base-</td></tr><tr><td>line translation of OpenSubtitles domain-specific</td></tr><tr><td>words is more formal. A large part of the seen do-</td></tr><tr><td>mains contain formal language in contrast to the</td></tr></table>",
"html": null,
"text": "",
"type_str": "table"
},
"TABREF13": {
"num": null,
"content": "<table/>",
"html": null,
"text": "Results from the ablation study investigating the influence of context from a different domain. Each row shows which domain is used as the test set and each column shows from which domain the context originates.",
"type_str": "table"
},
"TABREF14": {
"num": null,
"content": "<table><tr><td>: Number of model parameters. TagBase, Con-</td></tr><tr><td>cBase and DomEmb(max) have the same number of</td></tr><tr><td>parameters as SentBase.</td></tr></table>",
"html": null,
"text": "",
"type_str": "table"
},
"TABREF16": {
"num": null,
"content": "<table><tr><td>domain</td><td>ctx=1</td><td>ctx=5</td><td>ctx=10</td></tr><tr><td>Europarl</td><td>33.5</td><td>33.8</td><td>33.7</td></tr><tr><td colspan=\"2\">NewsComm 34.0</td><td>34.2</td><td>34.1</td></tr><tr><td>OpenSub</td><td>33.7</td><td>34.1</td><td>34.5</td></tr><tr><td>Rapid</td><td>39.7</td><td>39.8</td><td>39.7</td></tr><tr><td>Ubuntu</td><td>41.5</td><td>43.0</td><td>42.6</td></tr><tr><td>domain</td><td colspan=\"3\">CtxBase ConcBase DomEmb(a)</td></tr><tr><td>Europarl</td><td>34.0</td><td>34.1</td><td>33.7</td></tr><tr><td colspan=\"2\">NewsComm 34.0</td><td>33.9</td><td>34.1</td></tr><tr><td>OpenSub</td><td>33.9</td><td>34.5</td><td>34.5</td></tr><tr><td>Rapid</td><td>40.1</td><td>39.1</td><td>39.7</td></tr><tr><td>Ubuntu</td><td>42.3</td><td>42.3</td><td>42.6</td></tr></table>",
"html": null,
"text": "BLEU scores on the development sets of the multi-domain dataset.",
"type_str": "table"
},
"TABREF17": {
"num": null,
"content": "<table><tr><td colspan=\"3\">domain SentBase DomEmb(a)</td></tr><tr><td>TED</td><td>33.2</td><td>33.4</td></tr><tr><td>PatTR</td><td>36.4</td><td>36.3</td></tr></table>",
"html": null,
"text": "Results on the development sets using the DomEmb(avg) model with different context sizes and comparing DomEmb(avg) with ctx=10 against CtxBase and ConcBase.",
"type_str": "table"
},
"TABREF18": {
"num": null,
"content": "<table/>",
"html": null,
"text": "Domain adaptation results on PatTR and TED for SentBase and DomEmb(avg) on the development sets.",
"type_str": "table"
},
"TABREF19": {
"num": null,
"content": "<table/>",
"html": null,
"text": "Example translations obtained using sentence-level baseline and the DomEmb(avg) model. Relevant parts of the examples are in bold.",
"type_str": "table"
}
}
}
} |