{ "paper_id": "2021", "header": { "generated_with": "S2ORC 1.0.0", "date_generated": "2023-01-19T02:11:15.621904Z" }, "title": "Zero-Shot Cross-Lingual Dependency Parsing through Contextual Embedding Transformation", "authors": [ { "first": "Haoran", "middle": [], "last": "Xu", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Philipp", "middle": [], "last": "Koehn", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Barbu", "middle": [], "last": "Mititelu", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Victoria", "middle": [], "last": "Basmov", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Colin", "middle": [], "last": "Batchelor", "suffix": "", "affiliation": {}, "email": "" }, { "first": "John", "middle": [], "last": "Bauer", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Kepa", "middle": [], "last": "Bengoetxea", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Yevgeni", "middle": [], "last": "Berzak", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Ir- Shad", "middle": [ "Ahmad" ], "last": "Bhat", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Riyaz", "middle": [ "Ahmad" ], "last": "Bhat", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Erica", "middle": [], "last": "Bi- Agetti", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Eckhard", "middle": [], "last": "Bick", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Agn\u0117", "middle": [], "last": "Bielinskien\u0117", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Rogier", "middle": [], "last": "Blokland", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Victoria", "middle": [], "last": "Bobicev", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Lo\u00efc", "middle": [], "last": "Boizou", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Emanuel", "middle": [], "last": "Borges V\u00f6lker", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Carl", "middle": [], "last": "B\u00f6rstell", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Cristina", "middle": [], "last": "Bosco", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Gosse", "middle": [], "last": "Bouma", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Sam", "middle": [], "last": "Bowman", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Adriane", "middle": [], "last": "Boyd", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Kristina", "middle": [], "last": "Brokait\u0117", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Aljoscha", "middle": [], "last": "Burchardt", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Marie", "middle": [], "last": "Candito", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Bernard", "middle": [], "last": "Caron", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Gauthier", "middle": [], "last": "Caron", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Tatiana", "middle": [], "last": "Cavalcanti", "suffix": "", "affiliation": {}, "email": "" }, { "first": "G\u00fcl\u015fen", "middle": [ "Cebiroglu" ], "last": "Eryigit", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Flavio", "middle": [], "last": "Massimiliano", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Giuseppe", "middle": [ "G A" ], "last": "Celano", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Savas", "middle": [], "last": "Slavom\u00edr\u010d\u00e9pl\u00f6", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Fabricio", "middle": [], "last": "Cetin", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Ethan", "middle": [], "last": "Chalub", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jinho", "middle": [], "last": "Chi", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Yongseok", "middle": [], "last": "Choi", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jayeol", "middle": [], "last": "Cho", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Alessandra", "middle": [ "T" ], "last": "Chun", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Silvie", "middle": [], "last": "Cignarella", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Aur\u00e9lie", "middle": [], "last": "Cinkov\u00e1", "suffix": "", "affiliation": {}, "email": "" }, { "first": "", "middle": [], "last": "Collomb", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Agr\u0131", "middle": [], "last": "\u00c7\u00f6ltekin", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Miriam", "middle": [], "last": "Connor", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Marine", "middle": [], "last": "Courtin", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Eliza- Beth", "middle": [], "last": "Davidson", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Kaja", "middle": [], "last": "Dobrovoljc", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Timothy", "middle": [], "last": "Dozat", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Kira", "middle": [], "last": "Droganova", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Puneet", "middle": [], "last": "Dwivedi", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Hanne", "middle": [], "last": "Eckhoff", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Marhaba", "middle": [], "last": "Eli", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Ali", "middle": [], "last": "Elkahky", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Binyam", "middle": [], "last": "Ephrem", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Olga", "middle": [], "last": "Erina", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Er- Javec", "middle": [], "last": "Toma\u017e", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Aline", "middle": [], "last": "Etienne", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Wograine", "middle": [], "last": "Evelyn", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Rich\u00e1rd", "middle": [], "last": "Farkas", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Hector", "middle": [], "last": "Fernandez Alcalde", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jennifer", "middle": [], "last": "Fos- Ter", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Cl\u00e1udia", "middle": [], "last": "Freitas", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Kazunori", "middle": [], "last": "Fujita", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Katar\u00edna", "middle": [], "last": "Gajdo\u0161ov\u00e1", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Daniel", "middle": [], "last": "Galbraith", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Marcos", "middle": [], "last": "Garcia", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Moa", "middle": [], "last": "G\u00e4rdenfors", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Sebastian", "middle": [], "last": "Garza", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Kim", "middle": [], "last": "Gerdes", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Filip", "middle": [], "last": "Ginter", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Iakes", "middle": [], "last": "Goenaga", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Koldo", "middle": [], "last": "Gojenola", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Memduh", "middle": [], "last": "G\u00f6k\u0131rmak", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Yoav", "middle": [], "last": "Goldberg", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Xavier", "middle": [], "last": "G\u00f3mez", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Berta", "middle": [ "Gonz\u00e1lez" ], "last": "Saavedra", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Bernadeta", "middle": [], "last": "Grici\u016bt\u0117", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Matias", "middle": [], "last": "Grioni", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Lo\u00efc", "middle": [], "last": "Grobol", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Normunds", "middle": [], "last": "Gr\u016bz\u012btis", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Bruno", "middle": [], "last": "Guillaume", "suffix": "", "affiliation": {}, "email": "" }, { "first": "C\u00e9line", "middle": [], "last": "Guillot-Barbance", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Tunga", "middle": [], "last": "G\u00fcng\u00f6r", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Nizar", "middle": [], "last": "Habash", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jan", "middle": [], "last": "Haji\u010d", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Mika", "middle": [], "last": "H\u00e4m\u00e4l\u00e4inen", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Linh", "middle": [ "H\u00e0" ], "last": "M\u1ef9", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Na-Rae", "middle": [], "last": "Han", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Kim", "middle": [], "last": "Har- Ris", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Dag", "middle": [], "last": "Haug", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Johannes", "middle": [], "last": "Heinecke", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Oliver", "middle": [], "last": "Hellwig", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Felix", "middle": [], "last": "Hennig", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Barbora", "middle": [], "last": "Hladk\u00e1", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jaroslava", "middle": [], "last": "Hlav\u00e1\u010dov\u00e1", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Florinel", "middle": [], "last": "Hociung", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Petter", "middle": [], "last": "Hohle", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jena", "middle": [], "last": "Hwang", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Takumi", "middle": [], "last": "Ikeda", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Radu", "middle": [], "last": "Ion", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Elena", "middle": [], "last": "Irimia", "suffix": "", "affiliation": {}, "email": "" }, { "first": "O", "middle": [], "last": "L\u00e1j\u00edd\u00e9 Ishola", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Tom\u00e1\u0161", "middle": [], "last": "Jel\u00ednek", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Anders", "middle": [], "last": "Johannsen", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Hildur", "middle": [], "last": "J\u00f3nsd\u00f3ttir", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Fredrik", "middle": [], "last": "J\u00f8rgensen", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Markus", "middle": [], "last": "Juutinen", "suffix": "", "affiliation": {}, "email": "" }, { "first": "H\u00fcner", "middle": [], "last": "Ka\u015f\u0131kara", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Andre", "middle": [], "last": "Kaasen", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Nadezhda", "middle": [], "last": "Kabaeva", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Sylvain", "middle": [], "last": "Kahane", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Hiroshi", "middle": [], "last": "Kanayama", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jenna", "middle": [], "last": "Kan- Erva", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Boris", "middle": [], "last": "Katz", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Tolga", "middle": [], "last": "Kayadelen", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jessica", "middle": [], "last": "Ken- Ney", "suffix": "", "affiliation": {}, "email": "" }, { "first": "V\u00e1clava", "middle": [], "last": "Kettnerov\u00e1", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jesse", "middle": [], "last": "Kirchner", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Elena", "middle": [], "last": "Kle- Mentieva", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Arne", "middle": [], "last": "K\u00f6hn", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Abdullatif", "middle": [], "last": "K\u00f6ksal", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Kamil", "middle": [], "last": "Kopacewicz", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Timo", "middle": [], "last": "Korkiakangas", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Natalia", "middle": [], "last": "Kotsyba", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jolanta", "middle": [], "last": "Kovalevskait\u0117", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Simon", "middle": [], "last": "Krek", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Sookyoung", "middle": [], "last": "Kwak", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Veronika", "middle": [], "last": "Laippala", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Lorenzo", "middle": [], "last": "Lambertino", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Lu- Cia", "middle": [], "last": "Lam", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Tatiana", "middle": [], "last": "Lando", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Septina", "middle": [ "Dian" ], "last": "Larasati", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Alexei", "middle": [], "last": "Lavrentiev", "suffix": "", "affiliation": {}, "email": "" }, { "first": "John", "middle": [], "last": "Lee", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Phng", "middle": [], "last": "L\u00ea", "suffix": "", "affiliation": {}, "email": "" }, { "first": "H", "middle": [ "`" ], "last": "\u00d4ng", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Alessandro", "middle": [], "last": "Lenci", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Saran", "middle": [], "last": "Lertpradit", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Herman", "middle": [], "last": "Le- Ung", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Maria", "middle": [], "last": "Levina", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Ying", "middle": [], "last": "Cheuk", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Josie", "middle": [], "last": "Li", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Keying", "middle": [], "last": "Li", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Kyungtae", "middle": [], "last": "Li", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Yuan", "middle": [], "last": "Lim", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Nikola", "middle": [], "last": "Li", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Olga", "middle": [], "last": "Ljube\u0161i\u0107", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Olga", "middle": [], "last": "Loginova", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Teresa", "middle": [], "last": "Lyashevskaya", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Vivien", "middle": [], "last": "Lynn", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Aibek", "middle": [], "last": "Macketanz", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Michael", "middle": [], "last": "Makazhanov", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Christopher", "middle": [], "last": "Mandl", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Ruli", "middle": [], "last": "Manning", "suffix": "", "affiliation": {}, "email": "" }, { "first": "C\u0203t\u0203lina", "middle": [], "last": "Ma- Nurung", "suffix": "", "affiliation": {}, "email": "" }, { "first": "David", "middle": [], "last": "M\u0203r\u0203nduc", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Ka- Trin", "middle": [], "last": "Mare\u010dek", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Mart\u00ednez", "middle": [], "last": "Marheinecke", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Andr\u00e9", "middle": [], "last": "Alonso", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jan", "middle": [], "last": "Martins", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Hiroshi", "middle": [], "last": "Ma\u0161ek", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Yuji", "middle": [], "last": "Matsuda", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Ryan", "middle": [], "last": "Mat- Sumoto", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Sarah", "middle": [], "last": "Mcdonald", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Gus- Tavo", "middle": [], "last": "Mcguinness", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Niko", "middle": [], "last": "Mendon\u00e7a", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Margarita", "middle": [], "last": "Miekka", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Anna", "middle": [], "last": "Misir- Pashayeva", "suffix": "", "affiliation": {}, "email": "" }, { "first": "C\u0203t\u0203lin", "middle": [], "last": "Missil\u00e4", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Maria", "middle": [], "last": "Mititelu", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Yusuke", "middle": [], "last": "Mitrofan", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Simonetta", "middle": [], "last": "Miyao", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Amir", "middle": [], "last": "Montemagni", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Laura", "middle": [ "Moreno" ], "last": "More", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Keiko", "middle": [ "Sophie" ], "last": "Romero", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Tomohiko", "middle": [], "last": "Mori", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Shinsuke", "middle": [], "last": "Morioka", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Shigeki", "middle": [], "last": "Mori", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Bjartur", "middle": [], "last": "Moro", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Bohdan", "middle": [], "last": "Mortensen", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Kadri", "middle": [], "last": "Moskalevskyi", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Robert", "middle": [], "last": "Muischnek", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Yugo", "middle": [], "last": "Munro", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Kaili", "middle": [], "last": "Murawaki", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Pinkey", "middle": [], "last": "M\u00fc\u00fcrisep", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Juan", "middle": [], "last": "Nainwani", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Navarro", "middle": [], "last": "Igna- Cio", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Anna", "middle": [], "last": "Hor\u00f1iacek", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Gunta", "middle": [], "last": "Nedoluzhko", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Lng", "middle": [], "last": "Ne\u0161pore-B\u0113rzkalne", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Thi", "middle": [], "last": "Nguy\u02dc\u00ean", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Nguy\u02dc\u00ean", "middle": [ "Thi" ], "last": "Minh", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Yoshihiro", "middle": [], "last": "Nikaido", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Vitaly", "middle": [], "last": "Niko- Laev", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Rattima", "middle": [], "last": "Nitisaroj", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Hanna", "middle": [], "last": "Nurmi", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Stina", "middle": [], "last": "Ojala", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Atul", "middle": [ "Kr" ], "last": "Ojha", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Ad\u00e9dayo", "middle": [], "last": "Ol\u00fa\u00f2kun", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Mai", "middle": [], "last": "Omura", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Emeka", "middle": [], "last": "Onwuegbuzia", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Petya", "middle": [], "last": "Osenova", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Rober\u1e97", "middle": [], "last": "Ostling", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Lilja", "middle": [], "last": "\u00d8vrelid", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Aziye", "middle": [], "last": "Bet\u00fcl\u00f6zate\u015f", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Balk\u0131z\u00f6zt\u00fcrk", "middle": [], "last": "Ba\u015faran", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Niko", "middle": [], "last": "Partanen", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Elena", "middle": [], "last": "Pascual", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Marco", "middle": [], "last": "Passarotti", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Agnieszka", "middle": [], "last": "Pate- Juk", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Guilherme", "middle": [], "last": "Paulino-Passos", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Angelika", "middle": [], "last": "Peljak- \u0141api\u0144ska", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Siyao", "middle": [], "last": "Peng", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Cenel-Augusto", "middle": [], "last": "Perez", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Guy", "middle": [], "last": "Perrier", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Daria", "middle": [], "last": "Petrova", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Slav", "middle": [], "last": "Petrov", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jason", "middle": [], "last": "Phelan", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jussi", "middle": [], "last": "Piitulainen", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Tommi", "middle": [ "A" ], "last": "Pirinen", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Emily", "middle": [], "last": "Pitler", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Barbara", "middle": [], "last": "Plank", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Thierry", "middle": [], "last": "Poibeau", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Larisa", "middle": [], "last": "Ponomareva", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Martin", "middle": [], "last": "Popel", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Lauma", "middle": [], "last": "Pretkalni\u0146a", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Sophie", "middle": [], "last": "Pr\u00e9vost", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Prokopis", "middle": [], "last": "Prokopidis", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Adam", "middle": [], "last": "Przepi\u00f3rkowski", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Ti- Ina", "middle": [], "last": "Puolakainen", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Sampo", "middle": [], "last": "Pyysalo", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Peng", "middle": [], "last": "Qi", "suffix": "", "affiliation": {}, "email": "" }, { "first": "An- Driela", "middle": [], "last": "R\u00e4\u00e4bis", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Alexandre", "middle": [], "last": "Rademaker", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Loganathan", "middle": [], "last": "Ramasamy", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Taraka", "middle": [], "last": "Rama", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Carlos", "middle": [], "last": "Ramisch", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Vinit", "middle": [], "last": "Ravishankar", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Livy", "middle": [], "last": "Real", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Petru", "middle": [], "last": "Rebeja", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Siva", "middle": [], "last": "Reddy", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Georg", "middle": [], "last": "Rehm", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Ivan", "middle": [], "last": "Riabov", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Michael", "middle": [], "last": "Rie\u00dfler", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Erika", "middle": [], "last": "Rimkut\u0117", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Larissa", "middle": [], "last": "Rinaldi", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Laura", "middle": [], "last": "Rituma", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Luisa", "middle": [], "last": "Rocha", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Mykhailo", "middle": [], "last": "Romanenko", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Rudolf", "middle": [], "last": "Rosa", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Valentin", "middle": [], "last": "Ros", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Davide", "middle": [], "last": "Rovati", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Olga", "middle": [], "last": "Rudina", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Jack", "middle": [], "last": "Rueter", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Shoval", "middle": [], "last": "Sadde", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Beno\u00eet", "middle": [], "last": "Sagot", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Shadi", "middle": [], "last": "Saleh", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Alessio", "middle": [], "last": "Salomoni", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Tanja", "middle": [], "last": "Samard\u017ei\u0107", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Stephanie", "middle": [], "last": "Samson", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Manuela", "middle": [], "last": "Sanguinetti", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Dage", "middle": [], "last": "S\u00e4rg", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Baiba", "middle": [], "last": "Saul\u012bte", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Yanin", "middle": [], "last": "Sawanakunanon", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Salvatore", "middle": [], "last": "Scarlata", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Nathan", "middle": [], "last": "Schneider", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Sebastian", "middle": [], "last": "Schuster", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Djam\u00e9", "middle": [], "last": "Sed- Dah", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Wolfgang", "middle": [], "last": "Seeker", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Mojgan", "middle": [], "last": "Seraji", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Mo", "middle": [], "last": "Shen", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Atsuko", "middle": [], "last": "Shimada", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Hiroyuki", "middle": [], "last": "Shirasu", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Muh", "middle": [], "last": "Shohibus- Sirri", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Dmitry", "middle": [], "last": "Sichinava", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Aline", "middle": [], "last": "Silveira", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Natalia", "middle": [], "last": "Sil- Veira", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Maria", "middle": [], "last": "Simi", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Radu", "middle": [], "last": "Simionescu", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Katalin", "middle": [], "last": "Simk\u00f3", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Kiril", "middle": [], "last": "Simov", "suffix": "", "affiliation": {}, "email": "" }, { "first": "Maria", "middle": [], "last": "Skachedubova", "suffix": "", "affiliation": {}, "email": "" } ], "year": "", "venue": null, "identifiers": {}, "abstract": "Linear embedding transformation has been shown to be effective for zero-shot crosslingual transfer tasks and achieve surprisingly promising results. However, cross-lingual embedding space mapping is usually studied in static word-level embeddings, where a space transformation is derived by aligning representations of translation pairs that are referred from dictionaries. We move further from this line and investigate a contextual embedding alignment approach which is sense-level and dictionary-free. To enhance the quality of the mapping, we also provide a deep view of properties of contextual embeddings, i.e., the anisotropy problem and its solution. Experiments on zero-shot dependency parsing through the concept-shared space built by our embedding transformation substantially outperform state-of-the-art methods using multilingual embeddings.", "pdf_parse": { "paper_id": "2021", "_pdf_hash": "", "abstract": [ { "text": "Linear embedding transformation has been shown to be effective for zero-shot crosslingual transfer tasks and achieve surprisingly promising results. However, cross-lingual embedding space mapping is usually studied in static word-level embeddings, where a space transformation is derived by aligning representations of translation pairs that are referred from dictionaries. We move further from this line and investigate a contextual embedding alignment approach which is sense-level and dictionary-free. To enhance the quality of the mapping, we also provide a deep view of properties of contextual embeddings, i.e., the anisotropy problem and its solution. Experiments on zero-shot dependency parsing through the concept-shared space built by our embedding transformation substantially outperform state-of-the-art methods using multilingual embeddings.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Abstract", "sec_num": null } ], "body_text": [ { "text": "Cross-lingual embedding space alignment (Mikolov et al., 2013b; Artetxe et al., 2016; Xing et al., 2015; Conneau et al., 2018) recently has been attracted a lot of attention because cross-lingual model transfer is effectively facilitated by shared semantic spaces in NLP tasks, e.g., named entity recognition (Xie et al., 2018) , part-of-speech tagging (Hsu et al., 2019) , and dependency parsing (Schuster et al., 2019) , where dependency paring is scoped out in this paper. Compared with the delexicalized parsers (McDonald et al., 2011) , multilingual word embeddings have been demonstrated to significantly improve the performance of zero-shot dependency parsing by bridging the lexical feature gap (Guo et al., 2015) .", "cite_spans": [ { "start": 40, "end": 63, "text": "(Mikolov et al., 2013b;", "ref_id": "BIBREF16" }, { "start": 64, "end": 85, "text": "Artetxe et al., 2016;", "ref_id": "BIBREF0" }, { "start": 86, "end": 104, "text": "Xing et al., 2015;", "ref_id": "BIBREF26" }, { "start": 105, "end": 126, "text": "Conneau et al., 2018)", "ref_id": "BIBREF3" }, { "start": 309, "end": 327, "text": "(Xie et al., 2018)", "ref_id": "BIBREF25" }, { "start": 353, "end": 371, "text": "(Hsu et al., 2019)", "ref_id": "BIBREF9" }, { "start": 397, "end": 420, "text": "(Schuster et al., 2019)", "ref_id": "BIBREF23" }, { "start": 516, "end": 539, "text": "(McDonald et al., 2011)", "ref_id": "BIBREF14" }, { "start": 703, "end": 721, "text": "(Guo et al., 2015)", "ref_id": "BIBREF8" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "With the remarkable development of monolingual contextual pre-trained models (Peters et al., 2018; Devlin et al., 2019; Radford et al., 2019) , which dramatically outperform static word embeddings (Mikolov et al., 2013a; Pennington et al., 2014; Bojanowski et al., 2017) in broad NLP applications, increasing number of researchers have started focusing on contextual representation alignment for cross-lingual dependency parsing (Schuster et al., 2019; Wang et al., 2019) . Moreover, with the appearance of multilingual pre-trained models, such as Multilingual BERT (mBERT) (Devlin et al., 2019) , zero-shot dependency parsing becomes easier by utilizing the large vocabulary of the multilingual models (Kondratyuk and Straka, 2019) .", "cite_spans": [ { "start": 77, "end": 98, "text": "(Peters et al., 2018;", "ref_id": "BIBREF18" }, { "start": 99, "end": 119, "text": "Devlin et al., 2019;", "ref_id": "BIBREF4" }, { "start": 120, "end": 141, "text": "Radford et al., 2019)", "ref_id": "BIBREF19" }, { "start": 197, "end": 220, "text": "(Mikolov et al., 2013a;", "ref_id": "BIBREF15" }, { "start": 221, "end": 245, "text": "Pennington et al., 2014;", "ref_id": "BIBREF17" }, { "start": 246, "end": 270, "text": "Bojanowski et al., 2017)", "ref_id": "BIBREF1" }, { "start": 429, "end": 452, "text": "(Schuster et al., 2019;", "ref_id": "BIBREF23" }, { "start": 453, "end": 471, "text": "Wang et al., 2019)", "ref_id": "BIBREF24" }, { "start": 574, "end": 595, "text": "(Devlin et al., 2019)", "ref_id": "BIBREF4" }, { "start": 703, "end": 732, "text": "(Kondratyuk and Straka, 2019)", "ref_id": "BIBREF12" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "Our approach is most similar to Schuster et al. (2019) , which maps a target language space into a source language space through a linear transformation to realize zero-shot transfer in dependency parsing. Typically, a transformation is usually derived by word-level embedding alignment, while we explore a sense-level embedding alignment method to map bilingual spaces more precisely, where sense-level representations are split from multi-sense word-level embeddings. Furthermore, our mapping approach is dictionary-free which utilizes the silver token pairs from parallel corpora and eliminates the necessity of gold dictionaries. The experimental results of zero-shot dependency parsing demonstrate that two parser evaluation scores (UAS and LAS) of sense-level mapping are always better than of word-level one. Moreover, we also notice the anisotropy problem (Ethayarajh, 2019) (defined in Section 3.2) in contextual embeddings, which potentially deteriorate the performance of the zero-shot transfer task. We significantly mitigate this drawback by leveraging a prepossessing step, iterative normalization (IN) (Zhang et al., 2019) , which is originally used for improving the performance of static embedding mapping on the bilingual dictionary induction task.", "cite_spans": [ { "start": 32, "end": 54, "text": "Schuster et al. (2019)", "ref_id": "BIBREF23" }, { "start": 864, "end": 882, "text": "(Ethayarajh, 2019)", "ref_id": "BIBREF7" }, { "start": 1117, "end": 1137, "text": "(Zhang et al., 2019)", "ref_id": "BIBREF27" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "Zero-shot dependency parsing experiments are conducted on Universal Dependencies treebank v2.6 (Zeman et al., 2020), which shows that our results obtain a substantial gain compared with state-of-the-art methods using multilingual fastText and mBERT 1 .", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "Let denote X \u2208 R d\u00d7N as the word embedding matrix for a target language 2 , and Y as the word embedding matrix for a source language. For each column of the target embedding matrix x i \u2208 R d , it has one source embedding vector y i \u2208 R d corresponding to a source word translated from the target word i. We aim to derive a linear transformation matrix\u0174 used for mapping from the target language space to the source language space. This can be learned by minimizing the Frobenius norm:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Linear Cross-lingual Space Alignment", "sec_num": "2" }, { "text": "EQUATION", "cite_spans": [], "ref_spans": [], "eq_spans": [ { "start": 0, "end": 8, "text": "EQUATION", "ref_id": "EQREF", "raw_str": "W = arg min W \u2208R d\u00d7d W X \u2212 Y F", "eq_num": "(1)" } ], "section": "Linear Cross-lingual Space Alignment", "sec_num": "2" }, { "text": "Furthermore, Xing et al. (2015) show that the quality of space alignment is successfully improved with the orthogonal restriction, i.e, W T W = I. Thus, the problem can be solved by Procrustes approach (Sch\u00f6nemann, 1966) :", "cite_spans": [ { "start": 13, "end": 31, "text": "Xing et al. (2015)", "ref_id": "BIBREF26" }, { "start": 202, "end": 220, "text": "(Sch\u00f6nemann, 1966)", "ref_id": "BIBREF22" } ], "ref_spans": [], "eq_spans": [], "section": "Linear Cross-lingual Space Alignment", "sec_num": "2" }, { "text": "EQUATION", "cite_spans": [], "ref_spans": [], "eq_spans": [ { "start": 0, "end": 8, "text": "EQUATION", "ref_id": "EQREF", "raw_str": "W = arg min W \u2208O d\u00d7d W X \u2212 Y F = U V T s.t. U \u03a3V T = svd(Y X T )", "eq_num": "(2)" } ], "section": "Linear Cross-lingual Space Alignment", "sec_num": "2" }, { "text": "where O d\u00d7d is the set of orthogonal matrices.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Linear Cross-lingual Space Alignment", "sec_num": "2" }, { "text": "An unsupervised bidirectional word alignment algorithm based on IBM Model 2 (Brown et al., 1 Code is available at: https://github.com/ fe1ixxu/ZeroShot-CrossLing-Parsing.", "cite_spans": [ { "start": 91, "end": 92, "text": "1", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Contextual Embedding Transformation", "sec_num": "3.1" }, { "text": "2 Different from usual settings, we use x-related symbols for target data and y-related ones for source data. 1993), Fast Align (Dyer et al., 2013) , is first applied to a parallel corpus to derive silver aligned token pairs. We then respectively feed the parallel corpus to the BERTs of the target and the source languages and extract the outputs as contextual embeddings. As shown in Figure 1 , Fast Align bridges \"links\" between silver token pairs, and between the embeddings of the token pairs as well. Thus, for each target type, a collection of its contextual embeddings can be obtained, as well as a collection of contextual embeddings of its aligned source tokens. Vectors are normalized to satisfy the orthogonal condition.", "cite_spans": [ { "start": 128, "end": 147, "text": "(Dyer et al., 2013)", "ref_id": "BIBREF6" } ], "ref_spans": [ { "start": 386, "end": 394, "text": "Figure 1", "ref_id": "FIGREF0" } ], "eq_spans": [], "section": "Contextual Embedding Transformation", "sec_num": "3.1" }, { "text": "Motivated by the assumption that multiple senses of a type can construct multiple distinct clusters in its collection (Schuster et al., 2019) , we derive several sense-level (cluster-level) embeddings for a type by averaging vectors in each cluster. This splits the representations of multi-sense words and helps the anchor-driven space mapping in a finer resolution. To find clusters, we utilize k-means to cluster contextual embeddings in the vector collection of each type, and adaptively find the optimal k by an elbow-based method Satopaa et al. (2011) . Contextual vectors are only clustered in the target side to obtain sense-level embeddings, while the aligned sense-level embeddings in the source side can also be simultaneously derived because embeddings have been already \"linked\" by Fast Align. We next build a sense embedding matrix X s for the target language by putting the senselevel embeddings in each column, and meanwhile construct a column-wise aligned sense embedding matrix Y s in the source side. Finally, we obtain the optimal linear mapping\u0174 from X s to Y s by Equation 2. Pseudo code of transformation method is in Appendix A. . They can be perfectly fit after mapping now.", "cite_spans": [ { "start": 118, "end": 141, "text": "(Schuster et al., 2019)", "ref_id": "BIBREF23" }, { "start": 536, "end": 557, "text": "Satopaa et al. (2011)", "ref_id": "BIBREF21" } ], "ref_spans": [], "eq_spans": [], "section": "Contextual Embedding Transformation", "sec_num": "3.1" }, { "text": "Our findings show that contextual embeddings always hold anisotropic property, i.e., they are not uniformly distributed in the space and gather toward a narrow range of orientations. Importantly, degrees of anisotropy across languages are various, which undermines the quality of cross-lingual mappings. A toy example of how the anisotropy affect mappings is illustrated in Figure 2a . One metric for anisotropy is to calculate the average cosine similarity distance of randomly selected vectors. The higher the distance is, the narrower directions vectors point to. Note that the distance for an isotropic space is 0. To mitigate this problem, we introduce iterative normalization. For each token i, the embedding vector x i is forced to be zero-mean firstly in each iteration:", "cite_spans": [], "ref_spans": [ { "start": 374, "end": 383, "text": "Figure 2a", "ref_id": "FIGREF1" } ], "eq_spans": [], "section": "Anisotropy in Embedding Spaces", "sec_num": "3.2" }, { "text": "EQUATION", "cite_spans": [], "ref_spans": [], "eq_spans": [ { "start": 0, "end": 8, "text": "EQUATION", "ref_id": "EQREF", "raw_str": "x i = x i \u2212 1 N N i=1 x i", "eq_num": "(3)" } ], "section": "Anisotropy in Embedding Spaces", "sec_num": "3.2" }, { "text": "and then normalize it to a fixed length:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Anisotropy in Embedding Spaces", "sec_num": "3.2" }, { "text": "EQUATION", "cite_spans": [], "ref_spans": [], "eq_spans": [ { "start": 0, "end": 8, "text": "EQUATION", "ref_id": "EQREF", "raw_str": "x i = x i x i 2", "eq_num": "(4)" } ], "section": "Anisotropy in Embedding Spaces", "sec_num": "3.2" }, { "text": "The two steps are repeated until convergence. N is the total number of embeddings. The iterative preprocessing enforces the space to be uniformally distributed, and relative angles between vectors across languages to be more similar (Figure 2b ).", "cite_spans": [], "ref_spans": [ { "start": 233, "end": 243, "text": "(Figure 2b", "ref_id": "FIGREF1" } ], "eq_spans": [], "section": "Anisotropy in Embedding Spaces", "sec_num": "3.2" }, { "text": "A parser is first trained on a source language treebank, where outputs of a frozen BERT are used as embeddings. To apply the pre-trained parser to the target languages, we first replace the source BERT with the target BERT. Then, iterative normalization is operated to enforce contextual embeddings in a near-isotropic space. At last, we map the embeddings to the source language space. Specifically, for each target token i, its contextual representation x i is mapped by\u0174 x i . The processing of zero-shot dependency parsing is visualized in Figure 3 . Note that the space of pre-trained model has already fit to be near-isotropic by utilizing iterative normalization during training.", "cite_spans": [], "ref_spans": [ { "start": 544, "end": 552, "text": "Figure 3", "ref_id": "FIGREF2" } ], "eq_spans": [], "section": "Zero-shot Transfer", "sec_num": "3.3" }, { "text": "Our parser is the deep biaffine model from Dozat and Manning (2016) where hyperparameters are almost unchanged. The settings of all hyperparameters are listed in Appendix B. English is set as the source language and other languages are targets.", "cite_spans": [ { "start": 43, "end": 67, "text": "Dozat and Manning (2016)", "ref_id": "BIBREF5" } ], "ref_spans": [], "eq_spans": [], "section": "Experiment", "sec_num": "4" }, { "text": "In our experiments, we select 6 target languages from 4 language families for which we have offthe-shelf monolingual pre-trained BERT models (base-size). We train the parsing model only in the English treebank, and directly evaluate zero-shot transfer performance on the target languages.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Experiment", "sec_num": "4" }, { "text": "Aligned fastText: Our first baseline is multilingual fastText aligned by the RCSLS method (Joulin et al., 2018; Bojanowski et al., 2017) which is straightforwardly employed to the embedding layer for the corresponding language. mBERT: We compare our approach with both uncased and cased version of mBERT. Outputs of mBERT are directly used for the embedding layer.", "cite_spans": [ { "start": 90, "end": 111, "text": "(Joulin et al., 2018;", "ref_id": "BIBREF11" }, { "start": 112, "end": 136, "text": "Bojanowski et al., 2017)", "ref_id": "BIBREF1" } ], "ref_spans": [], "eq_spans": [], "section": "Baseline", "sec_num": "4.1" }, { "text": "Following the analysis that top layers of BERT contain more semantic information (Jawahar et al., 2019) , our contextual representation are normalized mean vector of the last 4 layers of BERT. The parallel corpora used to extract contextual embeddings are obtained from ParaCrawl v6.0 3 . For each language pairs, we select 1M parallel sentences whose length is shorter than 150. Since some noisy alignments are produced during Fast Align, we only take one-to-one token alignment into consideration. The dataset used for cross-lingual dependency parsing is the Universal Dependencies treebank v2.6 4 (Zeman et al., 2020). We store up to 10K contextual vectors extracted from BERT for non-OOV tokens 5 . Vectors in the collection of a target type are clustered to derive sense-level embeddings only if the token occurs more than 100 times. Otherwise, the representation for the token is the basic word-level embedding, i.e., the mean vector of its vector collection. Experiments of word-level embedding alignment are also conducted to compare with sense-level results.", "cite_spans": [ { "start": 81, "end": 103, "text": "(Jawahar et al., 2019)", "ref_id": "BIBREF10" } ], "ref_spans": [], "eq_spans": [], "section": "Settings", "sec_num": "4.2" }, { "text": "Forcing contextual embedding vectors in X s and Y s to be zero-mean is straightforward. Nevertheless, it is difficult to look for the universal mean vector of contextual embeddings when we train the English parser, because we do not have such an exact mean vector for all possible contextual embeddings. Thus, to successfully implement IN for pre-training the parser, we calculate the approximate universal mean vector by averaging all contextual vectors of every occurrence of tokens from the given training dataset in each iteration. IN runs for 5 iterations, which is sufficient for convergence.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Iterative Normalization Preprocessing", "sec_num": "4.3" }, { "text": "Compare with Previous Methods: Overall results are shown in Table 1. In the first place, our contextual-aware embedding mapping (row 4 -7) exceeds the aligned fastText (row 1) by a large margin. Moreover, our sense-level mapping without IN preprocessing outperforms uncased and cased mBERT by 0.67% and 1.42% on LAS on average, and mapping with preprocessing further outperforms them by 2.07% and 2.82% on average.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Why Contextual Embedding Mapping?", "sec_num": "5.1" }, { "text": "Dictionary-free Mapping: Typically, aligned embeddings take a static dictionary as reference but high-quality manual dictionaries are still rare (Ruder et al., 2019) . Our mapping skips the wordlevel alignment in dictionaries, and directly aligns the embeddings from parallel corpora which offers a large scope of token alignments.", "cite_spans": [ { "start": 145, "end": 165, "text": "(Ruder et al., 2019)", "ref_id": "BIBREF20" } ], "ref_spans": [], "eq_spans": [], "section": "Why Contextual Embedding Mapping?", "sec_num": "5.1" }, { "text": "Sense-level Mapping: Different from static embeddings whose words only have one unique representation, our contextual embeddings also take advantage of multiple representations for multi-sense words to improve the quality of anchor-driven mapping. In Table 1 , the performance of sense-level mapping always surpasses word-level mapping. Figure 4a illustrates the various degrees of anisotropy among different language pairs. As we expect, the anisotropic degree for English (pink, right) is basically constant, but there is large discrepancy between other target languages (blue, left). After IN preprocessing, all language spaces are approximately isotropic, where their scores of anisotropy dramatically reduce near to zero. One example of how the anisotropic degree drops down in each iteration of IN for the Spanish-English pair is illustrated in Figure 4b . IN assists the aligned embeddings in building more similar relative angles across embeddings in different language spaces. As shown in Table 1 , this preprocessing improves an absolute gain of 1.37% for word-level mapping and 1.40% for sense-level mapping on average.", "cite_spans": [], "ref_spans": [ { "start": 251, "end": 258, "text": "Table 1", "ref_id": "TABREF1" }, { "start": 337, "end": 346, "text": "Figure 4a", "ref_id": "FIGREF3" }, { "start": 851, "end": 860, "text": "Figure 4b", "ref_id": "FIGREF3" }, { "start": 998, "end": 1005, "text": "Table 1", "ref_id": "TABREF1" } ], "eq_spans": [], "section": "Why Contextual Embedding Mapping?", "sec_num": "5.1" }, { "text": "We proposed a linear, dictionary-free and senselevel contextual mapping approach by exploiting parallel corpus which has shown promising results and substantial improvement compared with multilingual fastText and mBERT in the zero-shot dependency parsing task. We also revealed that various degrees of anisotropy hurts the performance of mapping, and introduced iterative normalization to alleviate it by enforcing contextual embeddings to be uniformly distributed, which also has indicated the benefits of isotropy.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Conclusion", "sec_num": "6" }, { "text": "Pseudo Code is shown in Alogirthm 1.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "A Pseudo Code of Contextual Embedding Transformation", "sec_num": null }, { "text": "Here we list all hyperparamters for our pre-trained parser in Table 2 C pre-trained Monolingual BERTs", "cite_spans": [], "ref_spans": [ { "start": 62, "end": 69, "text": "Table 2", "ref_id": "TABREF3" } ], "eq_spans": [], "section": "B Hyperparamters", "sec_num": null }, { "text": "In Table 3 , we list the names of pre-trained monolingual BERTs from huggingface 6 that we used in our experiments.", "cite_spans": [], "ref_spans": [ { "start": 3, "end": 10, "text": "Table 3", "ref_id": null } ], "eq_spans": [], "section": "B Hyperparamters", "sec_num": null }, { "text": "Model name mbert uncased bert-base-multilingual-uncased mbert cased bert-base-multilingual-cased en bert-base-uncased es dccuchile/bert-base-spanish-wwm-uncased pt neuralmind/bert-base-portuguese-cased ro dumitrescustefan/bert-base-romanian-uncased-v1 pl", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Language", "sec_num": null }, { "text": "dkleczek/bert-base-polish-uncased-v1 fi bert-base-finnish-uncased-v1 el nlpaueb/bert-base-greek-uncased-v1 Table 3 : Names of Pre-trained BERT models.", "cite_spans": [], "ref_spans": [ { "start": 107, "end": 114, "text": "Table 3", "ref_id": null } ], "eq_spans": [], "section": "Language", "sec_num": null }, { "text": "6 https://huggingface.co/models", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Language", "sec_num": null }, { "text": "www.paracrawl.eu 4 https://lindat.mff.cuni.cz/ repository/xmlui/handle/11234/1-32265 We do not use the composition of subword vectors to approximately represent OOV tokens, because our preliminary results show this hurts the mapping.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null } ], "back_matter": [ { "text": "We thank the anonymous reviewers for their valuable comments.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Acknowledgments", "sec_num": null }, { "text": "Require: Target Corpus X , source Corpus Y, target pre-trained BERT Bx, source pre-trained BERT By, where X is the translation corpus of Y 1: function CONTEXTUAL-TRANSFORMATION(X , Y, Bx, By) 2: # Part 1: Collect embeddings 3:I \u2190 FAST-ALIGN(X , Y) I is an index-aligned corpus, where each line is composed of index pairs of aligned tokens for each parallel sentence.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Algorithm 1 Contextual Embedding Transformation", "sec_num": null }, { "text": "Initialize C \u2190 Empty Hash Map 5:for index i in LENGTH(X ) do number of sentences in the corpus 6:EX \u2190 Bx(X) Contextual embeddings of tokens: 8:EY \u2190 By(Y ) 9:for index j in LENGTH(X) do number of tokens in the sentence 10:x k \u2190 ELBOW-BASED(cx) Find optimal number of clusters 22:for Subcluster cx i in K-MEANS(k, cx) do 23:Get subcluster cy i due to aligned pair ((ex, ey)) in C[x] 24:meanx \u2190 mean vector of cx i 25:meany \u2190 mean vector of cy i 26:Put meanx in Xs as a column 27:Put meany in Ys as a column 28: end for 29: end for 30: 31: # Part 3: Derive embedding transformation 32: U \u03a3V T = svd(Y X T ) 33:\u0174 = U V T 34: return\u0174 35: end function", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "4:", "sec_num": null } ], "bib_entries": { "BIBREF0": { "ref_id": "b0", "title": "Learning principled bilingual mappings of word embeddings while preserving monolingual invariance", "authors": [ { "first": "Mikel", "middle": [], "last": "Artetxe", "suffix": "" }, { "first": "Gorka", "middle": [], "last": "Labaka", "suffix": "" }, { "first": "Eneko", "middle": [], "last": "Agirre", "suffix": "" } ], "year": 2016, "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", "volume": "", "issue": "", "pages": "2289--2294", "other_ids": { "DOI": [ "10.18653/v1/D16-1250" ] }, "num": null, "urls": [], "raw_text": "Mikel Artetxe, Gorka Labaka, and Eneko Agirre. 2016. Learning principled bilingual mappings of word em- beddings while preserving monolingual invariance. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 2289-2294, Austin, Texas. Association for Compu- tational Linguistics.", "links": null }, "BIBREF1": { "ref_id": "b1", "title": "Enriching word vectors with subword information", "authors": [ { "first": "Piotr", "middle": [], "last": "Bojanowski", "suffix": "" }, { "first": "Edouard", "middle": [], "last": "Grave", "suffix": "" }, { "first": "Armand", "middle": [], "last": "Joulin", "suffix": "" }, { "first": "Tomas", "middle": [], "last": "Mikolov", "suffix": "" } ], "year": 2017, "venue": "Transactions of the Association for Computational Linguistics", "volume": "5", "issue": "", "pages": "135--146", "other_ids": { "DOI": [ "10.1162/tacl_a_00051" ] }, "num": null, "urls": [], "raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enriching word vec- tors with subword information. Transactions of the Association for Computational Linguistics, 5:135- 146.", "links": null }, "BIBREF2": { "ref_id": "b2", "title": "The mathematics of statistical machine translation: Parameter estimation", "authors": [ { "first": "F", "middle": [], "last": "Peter", "suffix": "" }, { "first": "Stephen", "middle": [ "A Della" ], "last": "Brown", "suffix": "" }, { "first": "Vincent", "middle": [ "J" ], "last": "Pietra", "suffix": "" }, { "first": "Robert", "middle": [ "L" ], "last": "Della Pietra", "suffix": "" }, { "first": "", "middle": [], "last": "Mercer", "suffix": "" } ], "year": 1993, "venue": "Computational Linguistics", "volume": "19", "issue": "2", "pages": "263--311", "other_ids": {}, "num": null, "urls": [], "raw_text": "Peter F. Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and Robert L. Mercer. 1993. The math- ematics of statistical machine translation: Parameter estimation. Computational Linguistics, 19(2):263- 311.", "links": null }, "BIBREF3": { "ref_id": "b3", "title": "Word translation without parallel data", "authors": [ { "first": "Alexis", "middle": [], "last": "Conneau", "suffix": "" }, { "first": "Guillaume", "middle": [], "last": "Lample", "suffix": "" }, { "first": "Marc'aurelio", "middle": [], "last": "Ranzato", "suffix": "" }, { "first": "Ludovic", "middle": [], "last": "Denoyer", "suffix": "" }, { "first": "Herv\u00e9", "middle": [], "last": "J\u00e9gou", "suffix": "" } ], "year": 2018, "venue": "International Conference on Learning Representations", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Alexis Conneau, Guillaume Lample, Marc'Aurelio Ranzato, Ludovic Denoyer, and Herv\u00e9 J\u00e9gou. 2018. Word translation without parallel data. In International Conference on Learning Representations.", "links": null }, "BIBREF4": { "ref_id": "b4", "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", "authors": [ { "first": "Jacob", "middle": [], "last": "Devlin", "suffix": "" }, { "first": "Ming-Wei", "middle": [], "last": "Chang", "suffix": "" }, { "first": "Kenton", "middle": [], "last": "Lee", "suffix": "" }, { "first": "Kristina", "middle": [], "last": "Toutanova", "suffix": "" } ], "year": 2019, "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", "volume": "1", "issue": "", "pages": "4171--4186", "other_ids": { "DOI": [ "10.18653/v1/N19-1423" ] }, "num": null, "urls": [], "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", "links": null }, "BIBREF5": { "ref_id": "b5", "title": "Deep biaffine attention for neural dependency parsing", "authors": [ { "first": "Timothy", "middle": [], "last": "Dozat", "suffix": "" }, { "first": "D", "middle": [], "last": "Christopher", "suffix": "" }, { "first": "", "middle": [], "last": "Manning", "suffix": "" } ], "year": 2016, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1611.01734" ] }, "num": null, "urls": [], "raw_text": "Timothy Dozat and Christopher D Manning. 2016. Deep biaffine attention for neural dependency pars- ing. arXiv preprint arXiv:1611.01734.", "links": null }, "BIBREF6": { "ref_id": "b6", "title": "A simple, fast, and effective reparameterization of IBM model 2", "authors": [ { "first": "Chris", "middle": [], "last": "Dyer", "suffix": "" }, { "first": "Victor", "middle": [], "last": "Chahuneau", "suffix": "" }, { "first": "Noah", "middle": [ "A" ], "last": "Smith", "suffix": "" } ], "year": 2013, "venue": "Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", "volume": "", "issue": "", "pages": "644--648", "other_ids": {}, "num": null, "urls": [], "raw_text": "Chris Dyer, Victor Chahuneau, and Noah A. Smith. 2013. A simple, fast, and effective reparameteriza- tion of IBM model 2. In Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 644-648, Atlanta, Georgia. Association for Computational Linguistics.", "links": null }, "BIBREF7": { "ref_id": "b7", "title": "How contextual are contextualized word representations? comparing the geometry of BERT, ELMo, and GPT-2 embeddings", "authors": [ { "first": "Kawin", "middle": [], "last": "Ethayarajh", "suffix": "" } ], "year": 2019, "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", "volume": "", "issue": "", "pages": "55--65", "other_ids": { "DOI": [ "10.18653/v1/D19-1006" ] }, "num": null, "urls": [], "raw_text": "Kawin Ethayarajh. 2019. How contextual are con- textualized word representations? comparing the geometry of BERT, ELMo, and GPT-2 embed- dings. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 55-65, Hong Kong, China. Association for Computational Linguistics.", "links": null }, "BIBREF8": { "ref_id": "b8", "title": "Cross-lingual dependency parsing based on distributed representations", "authors": [ { "first": "Jiang", "middle": [], "last": "Guo", "suffix": "" }, { "first": "Wanxiang", "middle": [], "last": "Che", "suffix": "" }, { "first": "David", "middle": [], "last": "Yarowsky", "suffix": "" }, { "first": "Haifeng", "middle": [], "last": "Wang", "suffix": "" }, { "first": "Ting", "middle": [], "last": "Liu", "suffix": "" } ], "year": 2015, "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", "volume": "1", "issue": "", "pages": "1234--1244", "other_ids": { "DOI": [ "10.3115/v1/P15-1119" ] }, "num": null, "urls": [], "raw_text": "Jiang Guo, Wanxiang Che, David Yarowsky, Haifeng Wang, and Ting Liu. 2015. Cross-lingual depen- dency parsing based on distributed representations. In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 1234-1244, Beijing, China. Association for Computational Linguistics.", "links": null }, "BIBREF9": { "ref_id": "b9", "title": "Zero-shot reading comprehension by crosslingual transfer learning with multi-lingual language representation model", "authors": [ { "first": "Tsung-Yuan", "middle": [], "last": "Hsu", "suffix": "" }, { "first": "Chi-Liang", "middle": [], "last": "Liu", "suffix": "" }, { "first": "Hung-Yi", "middle": [], "last": "Lee", "suffix": "" } ], "year": 2019, "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", "volume": "", "issue": "", "pages": "5933--5940", "other_ids": { "DOI": [ "10.18653/v1/D19-1607" ] }, "num": null, "urls": [], "raw_text": "Tsung-Yuan Hsu, Chi-Liang Liu, and Hung-yi Lee. 2019. Zero-shot reading comprehension by cross- lingual transfer learning with multi-lingual lan- guage representation model. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 5933-5940, Hong Kong, China. Association for Computational Linguistics.", "links": null }, "BIBREF10": { "ref_id": "b10", "title": "What does BERT learn about the structure of language", "authors": [ { "first": "Ganesh", "middle": [], "last": "Jawahar", "suffix": "" }, { "first": "Beno\u00eet", "middle": [], "last": "Sagot", "suffix": "" }, { "first": "Djam\u00e9", "middle": [], "last": "Seddah", "suffix": "" } ], "year": 2019, "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", "volume": "", "issue": "", "pages": "3651--3657", "other_ids": { "DOI": [ "10.18653/v1/P19-1356" ] }, "num": null, "urls": [], "raw_text": "Ganesh Jawahar, Beno\u00eet Sagot, and Djam\u00e9 Seddah. 2019. What does BERT learn about the structure of language? In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 3651-3657, Florence, Italy. As- sociation for Computational Linguistics.", "links": null }, "BIBREF11": { "ref_id": "b11", "title": "Loss in translation: Learning bilingual word mapping with a retrieval criterion", "authors": [ { "first": "Armand", "middle": [], "last": "Joulin", "suffix": "" }, { "first": "Piotr", "middle": [], "last": "Bojanowski", "suffix": "" }, { "first": "Tomas", "middle": [], "last": "Mikolov", "suffix": "" }, { "first": "Herv\u00e9", "middle": [], "last": "J\u00e9gou", "suffix": "" }, { "first": "Edouard", "middle": [], "last": "Grave", "suffix": "" } ], "year": 2018, "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Armand Joulin, Piotr Bojanowski, Tomas Mikolov, Herv\u00e9 J\u00e9gou, and Edouard Grave. 2018. Loss in translation: Learning bilingual word mapping with a retrieval criterion. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing.", "links": null }, "BIBREF12": { "ref_id": "b12", "title": "75 languages, 1 model: Parsing universal dependencies universally", "authors": [ { "first": "Dan", "middle": [], "last": "Kondratyuk", "suffix": "" }, { "first": "Milan", "middle": [], "last": "Straka", "suffix": "" } ], "year": 2019, "venue": "Proceedings of the 2019", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Dan Kondratyuk and Milan Straka. 2019. 75 lan- guages, 1 model: Parsing universal dependen- cies universally. In Proceedings of the 2019", "links": null }, "BIBREF13": { "ref_id": "b13", "title": "Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", "authors": [], "year": null, "venue": "", "volume": "", "issue": "", "pages": "2779--2795", "other_ids": {}, "num": null, "urls": [], "raw_text": "Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 2779-2795.", "links": null }, "BIBREF14": { "ref_id": "b14", "title": "Multi-source transfer of delexicalized dependency parsers", "authors": [ { "first": "Ryan", "middle": [], "last": "Mcdonald", "suffix": "" }, { "first": "Slav", "middle": [], "last": "Petrov", "suffix": "" }, { "first": "Keith", "middle": [], "last": "Hall", "suffix": "" } ], "year": 2011, "venue": "Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing", "volume": "", "issue": "", "pages": "62--72", "other_ids": {}, "num": null, "urls": [], "raw_text": "Ryan McDonald, Slav Petrov, and Keith Hall. 2011. Multi-source transfer of delexicalized dependency parsers. In Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing, pages 62-72, Edinburgh, Scotland, UK. Association for Computational Linguistics.", "links": null }, "BIBREF15": { "ref_id": "b15", "title": "Efficient estimation of word representations in vector space", "authors": [ { "first": "Tomas", "middle": [], "last": "Mikolov", "suffix": "" }, { "first": "Kai", "middle": [], "last": "Chen", "suffix": "" }, { "first": "Greg", "middle": [], "last": "Corrado", "suffix": "" }, { "first": "Jeffrey", "middle": [], "last": "Dean", "suffix": "" } ], "year": 2013, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1301.3781" ] }, "num": null, "urls": [], "raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jef- frey Dean. 2013a. Efficient estimation of word representations in vector space. arXiv preprint arXiv:1301.3781.", "links": null }, "BIBREF16": { "ref_id": "b16", "title": "Exploiting similarities among languages for machine translation", "authors": [ { "first": "Tomas", "middle": [], "last": "Mikolov", "suffix": "" }, { "first": "V", "middle": [], "last": "Quoc", "suffix": "" }, { "first": "Ilya", "middle": [], "last": "Le", "suffix": "" }, { "first": "", "middle": [], "last": "Sutskever", "suffix": "" } ], "year": 2013, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1309.4168" ] }, "num": null, "urls": [], "raw_text": "Tomas Mikolov, Quoc V Le, and Ilya Sutskever. 2013b. Exploiting similarities among languages for ma- chine translation. arXiv preprint arXiv:1309.4168.", "links": null }, "BIBREF17": { "ref_id": "b17", "title": "Glove: Global vectors for word representation", "authors": [ { "first": "Jeffrey", "middle": [], "last": "Pennington", "suffix": "" }, { "first": "Richard", "middle": [], "last": "Socher", "suffix": "" }, { "first": "Christopher D", "middle": [], "last": "Manning", "suffix": "" } ], "year": 2014, "venue": "Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP)", "volume": "", "issue": "", "pages": "1532--1543", "other_ids": {}, "num": null, "urls": [], "raw_text": "Jeffrey Pennington, Richard Socher, and Christopher D Manning. 2014. Glove: Global vectors for word rep- resentation. In Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP), pages 1532-1543.", "links": null }, "BIBREF18": { "ref_id": "b18", "title": "Deep contextualized word representations", "authors": [ { "first": "Matthew", "middle": [], "last": "Peters", "suffix": "" }, { "first": "Mark", "middle": [], "last": "Neumann", "suffix": "" }, { "first": "Mohit", "middle": [], "last": "Iyyer", "suffix": "" }, { "first": "Matt", "middle": [], "last": "Gardner", "suffix": "" }, { "first": "Christopher", "middle": [], "last": "Clark", "suffix": "" }, { "first": "Kenton", "middle": [], "last": "Lee", "suffix": "" }, { "first": "Luke", "middle": [], "last": "Zettlemoyer", "suffix": "" } ], "year": 2018, "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", "volume": "1", "issue": "", "pages": "2227--2237", "other_ids": { "DOI": [ "10.18653/v1/N18-1202" ] }, "num": null, "urls": [], "raw_text": "Matthew Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word repre- sentations. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 2227- 2237, New Orleans, Louisiana. Association for Computational Linguistics.", "links": null }, "BIBREF19": { "ref_id": "b19", "title": "Language models are unsupervised multitask learners", "authors": [ { "first": "Alec", "middle": [], "last": "Radford", "suffix": "" }, { "first": "Jeffrey", "middle": [], "last": "Wu", "suffix": "" }, { "first": "Rewon", "middle": [], "last": "Child", "suffix": "" }, { "first": "David", "middle": [], "last": "Luan", "suffix": "" }, { "first": "Dario", "middle": [], "last": "Amodei", "suffix": "" }, { "first": "Ilya", "middle": [], "last": "Sutskever", "suffix": "" } ], "year": 2019, "venue": "OpenAI Blog", "volume": "1", "issue": "8", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Lan- guage models are unsupervised multitask learners. OpenAI Blog, 1(8):9.", "links": null }, "BIBREF20": { "ref_id": "b20", "title": "A survey of cross-lingual word embedding models", "authors": [ { "first": "Sebastian", "middle": [], "last": "Ruder", "suffix": "" }, { "first": "Ivan", "middle": [], "last": "Vuli\u0107", "suffix": "" }, { "first": "Anders", "middle": [], "last": "S\u00f8gaard", "suffix": "" } ], "year": 2019, "venue": "Journal of Artificial Intelligence Research", "volume": "65", "issue": "", "pages": "569--631", "other_ids": {}, "num": null, "urls": [], "raw_text": "Sebastian Ruder, Ivan Vuli\u0107, and Anders S\u00f8gaard. 2019. A survey of cross-lingual word embedding models. Journal of Artificial Intelligence Research, 65:569-631.", "links": null }, "BIBREF21": { "ref_id": "b21", "title": "Finding a\" kneedle\" in a haystack: Detecting knee points in system behavior", "authors": [ { "first": "Ville", "middle": [], "last": "Satopaa", "suffix": "" }, { "first": "Jeannie", "middle": [], "last": "Albrecht", "suffix": "" }, { "first": "David", "middle": [], "last": "Irwin", "suffix": "" }, { "first": "Barath", "middle": [], "last": "Raghavan", "suffix": "" } ], "year": 2011, "venue": "2011 31st international conference on distributed computing systems workshops", "volume": "", "issue": "", "pages": "166--171", "other_ids": {}, "num": null, "urls": [], "raw_text": "Ville Satopaa, Jeannie Albrecht, David Irwin, and Barath Raghavan. 2011. Finding a\" kneedle\" in a haystack: Detecting knee points in system be- havior. In 2011 31st international conference on distributed computing systems workshops, pages 166-171. IEEE.", "links": null }, "BIBREF22": { "ref_id": "b22", "title": "A generalized solution of the orthogonal procrustes problem", "authors": [ { "first": "H", "middle": [], "last": "Peter", "suffix": "" }, { "first": "", "middle": [], "last": "Sch\u00f6nemann", "suffix": "" } ], "year": 1966, "venue": "Psychometrika", "volume": "31", "issue": "1", "pages": "1--10", "other_ids": {}, "num": null, "urls": [], "raw_text": "Peter H Sch\u00f6nemann. 1966. A generalized solution of the orthogonal procrustes problem. Psychometrika, 31(1):1-10.", "links": null }, "BIBREF23": { "ref_id": "b23", "title": "Cross-lingual alignment of contextual word embeddings, with applications to zeroshot dependency parsing", "authors": [ { "first": "Tal", "middle": [], "last": "Schuster", "suffix": "" }, { "first": "Ori", "middle": [], "last": "Ram", "suffix": "" }, { "first": "Regina", "middle": [], "last": "Barzilay", "suffix": "" }, { "first": "Amir", "middle": [], "last": "Globerson", "suffix": "" } ], "year": 2019, "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", "volume": "1", "issue": "", "pages": "1599--1613", "other_ids": { "DOI": [ "10.18653/v1/N19-1162" ] }, "num": null, "urls": [], "raw_text": "Tal Schuster, Ori Ram, Regina Barzilay, and Amir Globerson. 2019. Cross-lingual alignment of con- textual word embeddings, with applications to zero- shot dependency parsing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 1599-1613, Minneapolis, Minnesota. Association for Computational Linguis- tics.", "links": null }, "BIBREF24": { "ref_id": "b24", "title": "Cross-lingual bert transformation for zero-shot dependency parsing", "authors": [ { "first": "Yuxuan", "middle": [], "last": "Wang", "suffix": "" }, { "first": "Wanxiang", "middle": [], "last": "Che", "suffix": "" }, { "first": "Jiang", "middle": [], "last": "Guo", "suffix": "" }, { "first": "Yijia", "middle": [], "last": "Liu", "suffix": "" }, { "first": "Ting", "middle": [], "last": "Liu", "suffix": "" } ], "year": 2019, "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", "volume": "", "issue": "", "pages": "5725--5731", "other_ids": {}, "num": null, "urls": [], "raw_text": "Yuxuan Wang, Wanxiang Che, Jiang Guo, Yijia Liu, and Ting Liu. 2019. Cross-lingual bert trans- formation for zero-shot dependency parsing. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 5725-5731.", "links": null }, "BIBREF25": { "ref_id": "b25", "title": "Neural crosslingual named entity recognition with minimal resources", "authors": [ { "first": "Jiateng", "middle": [], "last": "Xie", "suffix": "" }, { "first": "Zhilin", "middle": [], "last": "Yang", "suffix": "" }, { "first": "Graham", "middle": [], "last": "Neubig", "suffix": "" }, { "first": "Noah", "middle": [ "A" ], "last": "Smith", "suffix": "" }, { "first": "Jaime", "middle": [], "last": "Carbonell", "suffix": "" } ], "year": 2018, "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", "volume": "", "issue": "", "pages": "369--379", "other_ids": { "DOI": [ "10.18653/v1/D18-1034" ] }, "num": null, "urls": [], "raw_text": "Jiateng Xie, Zhilin Yang, Graham Neubig, Noah A. Smith, and Jaime Carbonell. 2018. Neural cross- lingual named entity recognition with minimal re- sources. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 369-379, Brussels, Belgium. Association for Computational Linguistics.", "links": null }, "BIBREF26": { "ref_id": "b26", "title": "Normalized word embedding and orthogonal transform for bilingual word translation", "authors": [ { "first": "Chao", "middle": [], "last": "Xing", "suffix": "" }, { "first": "Dong", "middle": [], "last": "Wang", "suffix": "" }, { "first": "Chao", "middle": [], "last": "Liu", "suffix": "" }, { "first": "Yiye", "middle": [], "last": "Lin ; Veronika Vincze", "suffix": "" }, { "first": "Aya", "middle": [], "last": "Wakasa", "suffix": "" }, { "first": "Lars", "middle": [], "last": "Wallin", "suffix": "" }, { "first": "Abigail", "middle": [], "last": "Walsh", "suffix": "" }, { "first": "Jing Xian", "middle": [], "last": "Wang", "suffix": "" }, { "first": "Jonathan", "middle": [ "North" ], "last": "Washington", "suffix": "" }, { "first": "Maximilan", "middle": [], "last": "Wendt", "suffix": "" }, { "first": "Paul", "middle": [], "last": "Widmer", "suffix": "" }, { "first": "Seyi", "middle": [], "last": "Williams", "suffix": "" }, { "first": "Mats", "middle": [], "last": "Wir\u00e9n", "suffix": "" }, { "first": "Christian", "middle": [], "last": "Wittern", "suffix": "" }, { "first": "Tsegay", "middle": [], "last": "Woldemariam", "suffix": "" }, { "first": "Tak-Sum", "middle": [], "last": "Wong", "suffix": "" }, { "first": "Alina", "middle": [], "last": "Wr\u00f3blewska", "suffix": "" }, { "first": "Mary", "middle": [], "last": "Yako", "suffix": "" }, { "first": "Kayo", "middle": [], "last": "Yamashita", "suffix": "" }, { "first": "Naoki", "middle": [], "last": "Yamazaki", "suffix": "" }, { "first": "Chunxiao", "middle": [], "last": "Yan", "suffix": "" }, { "first": "Koichi", "middle": [], "last": "Yasuoka", "suffix": "" }, { "first": "M", "middle": [], "last": "Marat", "suffix": "" }, { "first": "Zhuoran", "middle": [], "last": "Yavrumyan", "suffix": "" }, { "first": "", "middle": [], "last": "Yu", "suffix": "" }, { "first": "Amir", "middle": [], "last": "Zden\u011bk\u017eabokrtsk\u00fd", "suffix": "" }, { "first": "", "middle": [], "last": "Zeldes", "suffix": "" } ], "year": 2015, "venue": "Hanzhi Zhu, and Anna Zhuravleva. 2020. Universal dependencies 2.6. LINDAT/CLARIAH-CZ digital library at the Institute of Formal and Applied Linguistics (\u00daFAL), Faculty of Mathematics and Physics", "volume": "", "issue": "", "pages": "", "other_ids": { "DOI": [ "10.3115/v1/N15-1104" ] }, "num": null, "urls": [], "raw_text": "Chao Xing, Dong Wang, Chao Liu, and Yiye Lin. 2015. Normalized word embedding and orthogonal trans- form for bilingual word translation. In Proceedings Varga, Eric Villemonte de la Clergerie, Veronika Vincze, Aya Wakasa, Lars Wallin, Abigail Walsh, Jing Xian Wang, Jonathan North Washington, Max- imilan Wendt, Paul Widmer, Seyi Williams, Mats Wir\u00e9n, Christian Wittern, Tsegay Woldemariam, Tak-sum Wong, Alina Wr\u00f3blewska, Mary Yako, Kayo Yamashita, Naoki Yamazaki, Chunxiao Yan, Koichi Yasuoka, Marat M. Yavrumyan, Zhuoran Yu, Zden\u011bk\u017dabokrtsk\u00fd, Amir Zeldes, Hanzhi Zhu, and Anna Zhuravleva. 2020. Universal dependencies 2.6. LINDAT/CLARIAH-CZ digital library at the Insti- tute of Formal and Applied Linguistics (\u00daFAL), Fac- ulty of Mathematics and Physics, Charles Univer- sity.", "links": null }, "BIBREF27": { "ref_id": "b27", "title": "Are girls neko or sh\u014djo? cross-lingual alignment of non-isomorphic embeddings with iterative normalization", "authors": [ { "first": "Mozhi", "middle": [], "last": "Zhang", "suffix": "" }, { "first": "Keyulu", "middle": [], "last": "Xu", "suffix": "" }, { "first": "Ken-Ichi", "middle": [], "last": "Kawarabayashi", "suffix": "" }, { "first": "Stefanie", "middle": [], "last": "Jegelka", "suffix": "" }, { "first": "Jordan", "middle": [], "last": "Boyd-Graber", "suffix": "" } ], "year": 2019, "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", "volume": "", "issue": "", "pages": "3180--3189", "other_ids": { "DOI": [ "10.18653/v1/P19-1307" ] }, "num": null, "urls": [], "raw_text": "Mozhi Zhang, Keyulu Xu, Ken-ichi Kawarabayashi, Stefanie Jegelka, and Jordan Boyd-Graber. 2019. Are girls neko or sh\u014djo? cross-lingual alignment of non-isomorphic embeddings with iterative normal- ization. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 3180-3189, Florence, Italy. Association for Computational Linguistics.", "links": null } }, "ref_entries": { "FIGREF0": { "uris": null, "num": null, "type_str": "figure", "text": "The target tokens (left, blue) and the source tokens (right, black) are aligned by Fast Align, so their contextual embeddings can be aligned as well." }, "FIGREF1": { "uris": null, "num": null, "type_str": "figure", "text": "(a) Spanish vectors (purple arrows) cannot well fit to English vectors (pink arrows) by a linear transformation because they gather in different degrees of cones (different angles between vectors), where dash lines are mapped vectors. (b) After iterative normalization, Spanish and English vectors are uniformly distributed (same angles between vectors)" }, "FIGREF2": { "uris": null, "num": null, "type_str": "figure", "text": "The workflow of how zero-shot transfer processes in our model architecture." }, "FIGREF3": { "uris": null, "num": null, "type_str": "figure", "text": "(a) Discrepancy of anisotropic degrees for all tested language pairs, where scores of anisotropic degree are calculated by the mean cosine similarity between 1000 randomly selected vectors in their language spaces. (b) The isotropic degrees basically decrease to 0 at the first iteration and converge afterwards." }, "TABREF1": { "type_str": "table", "html": null, "content": "", "num": null, "text": "UAS and LAS of zero-shot evaluation for various languages on test files. The highest scores are bolded and the second highest scores are underlined. Language families are split by dash lines. lang = language, en = English, es = Spanish, pt = Portuguese, ro = Romanian, pl = Polish, fi = Finnish, el = Greek." }, "TABREF3": { "type_str": "table", "html": null, "content": "
", "num": null, "text": "Hyperparameters for deep biaffine dependency parser training." } } } }