{
"paper_id": "2020",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T01:08:06.799213Z"
},
"title": "TALN/LS2N Participation at the BUCC Shared Task: Bilingual Dictionary Induction from Comparable Corpora",
"authors": [
{
"first": "Martin",
"middle": [],
"last": "Laville",
"suffix": "",
"affiliation": {
"laboratory": "UMR CNRS 6004",
"institution": "Universit\u00e9 de Nantes",
"location": {
"country": "France"
}
},
"email": ""
},
{
"first": "Amir",
"middle": [],
"last": "Hazem",
"suffix": "",
"affiliation": {
"laboratory": "UMR CNRS 6004",
"institution": "Universit\u00e9 de Nantes",
"location": {
"country": "France"
}
},
"email": ""
},
{
"first": "Emmanuel",
"middle": [],
"last": "Morin",
"suffix": "",
"affiliation": {
"laboratory": "UMR CNRS 6004",
"institution": "Universit\u00e9 de Nantes",
"location": {
"country": "France"
}
},
"email": ""
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "This paper describes the TALN/LS2N system participation at the Building and Using Comparable Corpora (BUCC) shared task. We first introduce three strategies: (i) a word embedding approach based on fastText embeddings; (ii) a concatenation approach using both character Skip-gram and character CBOW models, and finally (iii) a cognates matching approach based on an exact match string similarity. Then, we present the applied strategy for the shared task which consists in the combination of the embeddings concatenation and the cognates matching approaches. The covered languages are French, English, German, Russian and Spanish. Overall, our system mixing embeddings concatenation and perfect cognates matching obtained the best results while compared to individual strategies, except for English-Russian and Russian-English language pairs for which the concatenation approach was preferred.",
"pdf_parse": {
"paper_id": "2020",
"_pdf_hash": "",
"abstract": [
{
"text": "This paper describes the TALN/LS2N system participation at the Building and Using Comparable Corpora (BUCC) shared task. We first introduce three strategies: (i) a word embedding approach based on fastText embeddings; (ii) a concatenation approach using both character Skip-gram and character CBOW models, and finally (iii) a cognates matching approach based on an exact match string similarity. Then, we present the applied strategy for the shared task which consists in the combination of the embeddings concatenation and the cognates matching approaches. The covered languages are French, English, German, Russian and Spanish. Overall, our system mixing embeddings concatenation and perfect cognates matching obtained the best results while compared to individual strategies, except for English-Russian and Russian-English language pairs for which the concatenation approach was preferred.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Cross-lingual word embeddings learning has triggered great attention in the recent years and several bilingual supervised (Mikolov et al., 2013; Xing et al., 2015; Artetxe et al., 2018a) and unsupervised (Artetxe et al., 2018b; Conneau et al., 2017) alignment methods have been proposed so far. Also, multilingual alignment approaches which consists in mapping several languages in one common space via a pivot language (Smith et al., 2017) or by training all language pairs simultaneously (Chen and Cardie, 2018; Wada et al., 2019; Taitelbaum et al., 2019b; Taitelbaum et al., 2019a; Alaux et al., 2018) are attracting a great attention. Among possible downstream applications of cross-lingual embedding models: Bilingual Lexicon Induction (BLI) which consists in the identification of translation pairs based on a comparable corpus. The BUCC shared task offers the first evaluation framework on BLI from comparable corpora. It covers six languages (English, French, German, Russian, Spanish and Chinese) and two corpora (Wikipedia and WaCKy). We describe in this paper our participation at the BLI shared task. We start by evaluating the cross-lingual word embedding mapping approach (VecMap) (Artetxe et al., 2018a) using fastText embeddings. Then, we present an extension of VecMap approach that uses the concatenation of two mapped embedding models (Hazem and Morin, 2018) . Finally, we present a cognates matching approach, merely an exact match string similarity. Based on the obtained results of the studied approaches, we derive our proposed system -Mix (Conc + Dist)-which combines the outputs of the embeddings concatenation and the cognates matching approaches. Overall, the obtained results on the validation data sets are in favor of our system for all language pairs except for English-Russian and Russian-English pairs, where the cognates matching approach obviously showed very weak results and for which the concatenation approach was preferred. In the following, Section 2 describes the shared task data sets, Section 3 presents the tested approaches and the chosen strategy. The results are given in Section 4, Section 5 discusses the quality of the seed lexicons, and finally, Section 6 concludes our work.",
"cite_spans": [
{
"start": 122,
"end": 144,
"text": "(Mikolov et al., 2013;",
"ref_id": "BIBREF9"
},
{
"start": 145,
"end": 163,
"text": "Xing et al., 2015;",
"ref_id": "BIBREF15"
},
{
"start": 164,
"end": 186,
"text": "Artetxe et al., 2018a)",
"ref_id": "BIBREF1"
},
{
"start": 204,
"end": 227,
"text": "(Artetxe et al., 2018b;",
"ref_id": "BIBREF2"
},
{
"start": 228,
"end": 249,
"text": "Conneau et al., 2017)",
"ref_id": "BIBREF6"
},
{
"start": 420,
"end": 440,
"text": "(Smith et al., 2017)",
"ref_id": "BIBREF11"
},
{
"start": 490,
"end": 513,
"text": "(Chen and Cardie, 2018;",
"ref_id": "BIBREF5"
},
{
"start": 514,
"end": 532,
"text": "Wada et al., 2019;",
"ref_id": "BIBREF14"
},
{
"start": 533,
"end": 558,
"text": "Taitelbaum et al., 2019b;",
"ref_id": "BIBREF13"
},
{
"start": 559,
"end": 584,
"text": "Taitelbaum et al., 2019a;",
"ref_id": "BIBREF12"
},
{
"start": 585,
"end": 604,
"text": "Alaux et al., 2018)",
"ref_id": "BIBREF0"
},
{
"start": 1195,
"end": 1218,
"text": "(Artetxe et al., 2018a)",
"ref_id": "BIBREF1"
},
{
"start": 1354,
"end": 1377,
"text": "(Hazem and Morin, 2018)",
"ref_id": "BIBREF8"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1."
},
{
"text": "The topic of the shared task is bilingual lexicon induction from comparable corpora. Its aim is to extract for each given source word, its target translations. The quality of the extracted lexicons is measured in terms of F1-score. To allow a deeper results analysis, the evaluation is conducted on three test sets corresponding to frequency ranges of the source language word: high (the frequency is among the 5000 most frequent words), mid (words ranking between 5001 and 20000) and low (words ranking between 20001 to 50000).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "BLI Shared Task",
"sec_num": "2."
},
{
"text": "The BLI shared task is composed of two tracks that is: (i) the closed task and (ii) the open task. In the closed task, only the data sets provided by the organizers can be used, while in the open track, external data as well as other language pairs evaluation are allowed. In this paper, only the closed track is addressed.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Tracks",
"sec_num": "2.1."
},
{
"text": "Two comparable corpora are provided: Wikipedia and WaCKy corpora (Baroni et al., 2009) . Following the recommendations of the organizers, Table 1 illustrates the language pairs and their corresponding corpora that we address in the closed track. ",
"cite_spans": [
{
"start": 65,
"end": 86,
"text": "(Baroni et al., 2009)",
"ref_id": "BIBREF3"
}
],
"ref_spans": [
{
"start": 138,
"end": 145,
"text": "Table 1",
"ref_id": "TABREF1"
}
],
"eq_spans": [],
"section": "Data Sets",
"sec_num": "2.2."
},
{
"text": "In this section, we present the three tested strategies as well as the chosen system to address the BLI shared task.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Approach",
"sec_num": "3."
},
{
"text": "To extract bilingual lexicons from comparable corpora, a well-known word embedding approach that maps source words in a target space has been introduced (Mikolov et al., 2013) and several mapping improvements have been proposed (Xing et al., 2015; Artetxe et al., 2018a) . The basic idea is to learn an efficient transfer matrix that preserves translation pairs proximity of a seed lexicon. After the mapping step, a similarity measure is used to rank the translation candidates.",
"cite_spans": [
{
"start": 153,
"end": 175,
"text": "(Mikolov et al., 2013)",
"ref_id": "BIBREF9"
},
{
"start": 228,
"end": 247,
"text": "(Xing et al., 2015;",
"ref_id": "BIBREF15"
},
{
"start": 248,
"end": 270,
"text": "Artetxe et al., 2018a)",
"ref_id": "BIBREF1"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Word Embeddings and Mapping",
"sec_num": "3.1."
},
{
"text": "To apply the mapping approach, several embedding models can be used such as Skip-gram and CBOW (Mikolov et al., 2013) , Glove (Pennington et al., 2014) , character Skipgram (Bojanowski et al., 2016) , etc. In our approach, we used fastText (Bojanowski et al., 2016) as our word embeddings representations. We trained character Skip-gram and CBOW models, using the same parameters as the given pretrained embeddings for both methods: minCount: 30; dim: 300; ws (context window): 7; epochs: 10; neg (number of negatives sampled): 10. For the English-Spanish pair, our embeddings were trained on Wikipedia. For all the other language pairs, the embedding models were trained on their corresponding WaCKy corpora. After training our embeddings, we used the VecMap tool from Artetxe et al. (2018a) to project by pairs every source embeddings space in its corresponding target space (i.e. Skip-gram English mapped with Skip-gram Spanish or CBOW French mapped with CBOW German). We used the supervised method and split the training seed lexicon 80/20 for training and validation. For the submitted results, we took the whole seed lexicon as training for the mapping. Once our embeddings were projected in the same space, we compared every source word of our reference lists to every target word of the vocabulary with a similarity measure. We used the CSLS (Conneau et al., 2017) , which is based on the cosine similarity but reduces the similarity for word vectors in dense areas and increases it for isolated ones:",
"cite_spans": [
{
"start": 95,
"end": 117,
"text": "(Mikolov et al., 2013)",
"ref_id": "BIBREF9"
},
{
"start": 126,
"end": 151,
"text": "(Pennington et al., 2014)",
"ref_id": "BIBREF10"
},
{
"start": 173,
"end": 198,
"text": "(Bojanowski et al., 2016)",
"ref_id": "BIBREF4"
},
{
"start": 240,
"end": 265,
"text": "(Bojanowski et al., 2016)",
"ref_id": "BIBREF4"
},
{
"start": 770,
"end": 792,
"text": "Artetxe et al. (2018a)",
"ref_id": "BIBREF1"
},
{
"start": 1350,
"end": 1372,
"text": "(Conneau et al., 2017)",
"ref_id": "BIBREF6"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Word Embeddings and Mapping",
"sec_num": "3.1."
},
{
"text": "CSLS(x s , y t ) = 2cos(x s , y t ) \u2212 knn(x s ) \u2212 knn(y t ) (1)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Word Embeddings and Mapping",
"sec_num": "3.1."
},
{
"text": "where x s (y t ) is the vector from source (target) space and knn(x s ) (knn(y t )) is the mean of the cosine of its knearest neighbors in the target (source) space. This similarity measure allows us to order the target words from the most to the less likely to be the translation, but as there is multiple words as valid translations, we can not just keep the first word of each ranking. We used two criteria to select the candidates from the embeddings approach: i) a maximal number of candidates that we want to keep for each source word and ii) a minimal CSLS value to validate the candidates. We present the different values that we used for every language pair in Table 2 . These values were fixed empirically on the validation set. ",
"cite_spans": [],
"ref_spans": [
{
"start": 670,
"end": 677,
"text": "Table 2",
"ref_id": "TABREF3"
}
],
"eq_spans": [],
"section": "Word Embeddings and Mapping",
"sec_num": "3.1."
},
{
"text": "In order to take advantage of several embedding models, Hazem and Morin (2018) proposed an extension of the mapping approach by applying the concatenation or addition of two embedding models before performing the mapping approach. In our case, and for each language, we applied the concatenation of character CBOW and character Skip-gram models for each word. Starting from the mapped 300 dimensional embeddings from the previous step, we obtained a concatenated embedding vector of 600 dimensions for each source and target words.",
"cite_spans": [
{
"start": 56,
"end": 78,
"text": "Hazem and Morin (2018)",
"ref_id": "BIBREF8"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Embeddings Concatenation",
"sec_num": "3.2."
},
{
"text": "A careful analysis of the training reference lists revealed that many translation pairs were graphically identical, especially for the low frequency lists. While some of these words are perfect cognates, a part of them are inconsistencies (i.e. the English to French translation pair someone -someone). We give more details of these problems in Section 5. To take this into consideration, we selected as valid candidates for every source word its perfect cognates if present in the target vocabulary. We added the constraint that each translation word pairs must have a distribution with a proportional factor of n. Given a source word w s and its corresponding translation w t , and given the frequency of w s (f req(w s )), respectively the frequency of w t (f req(w t )). The constraint is represented as:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Perfect Cognates",
"sec_num": "3.3."
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "1 n \u2264 f req(w s ) f req(w t ) \u2264 n",
"eq_num": "(2)"
}
],
"section": "Perfect Cognates",
"sec_num": "3.3."
},
{
"text": "where n was fixed empirically to 100.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Perfect Cognates",
"sec_num": "3.3."
},
{
"text": "To improve performance, combining several approaches is often performed. As will be shown in Table 3 of the results Section, the embeddings approach performs better on high frequency pairs while the perfect cognates method shows good results on lower range pairs. Hence, we naturally combined the extracted candidates of both strategies to provide one final mixed list, without taking into account the previous limit of the number of candidates. This mixing approach also noted -Mix (Conc + Dist)-, corresponds to our participating system to the BLI shared task. One exception however, concerns English and Russian languages for which we applied the concatenation approach only. Table 3 presents the obtained results (F1-score) of the individual strategies: (i) the mapping approach (Skip-gram and CBOW); (ii) the concatenation approach (Concatenation); (iii) the perfect cognates approach; and our proposed system (iv) Mix (Conc + Dist), on the validation sets for all language pairs. We notice that mixing the candidates from the concatenated embeddings method and the perfect cognates extraction (Mix (Conc + Dist)) obtains the best results in almost every configuration, except one from English to Spanish and, obviously, the two pairs containing Russian, due to the different alphabets between English and Russian. Nevertheless, the English to Russian pair has a F1-score superior to zero, meaning that some Russian words are not written in Cyrillic, questioning the consistency of the lists. The better results of the mixed method indicate a good complementarity of both approaches, which is confirmed by the trends regarding the frequency lists. We observe that the embeddings approach performs better on high fre-quency pairs and then degrades as the frequency decreases. Conversely, for the perfect cognates approach, the results are very high for the low frequency pairs and degrades for translation pairs of higher frequencies. The decline of results for perfect cognates is mostly due to the fact that high frequency words tend to have more translations than low ones (see Table 4 ) and the perfect cognates can at most predict one translation per source word. The numbers illustrated in Table 4 corresponds to the validation lists, and not to the whole dictionaries. As additional information, not shown in Table 3 , it is to note that the perfect cognates method has a high precision for most language pairs, and it finds usually for more than half of the source words a perfect cognate in the target vocabulary. And thus, the results in F1-score are particularly high for the German-French pair in both directions as only few source words have more than one translation on the reference lists (1.03 target words per source words). Finally, we note that the embeddings approach for the English-Spanish pair in both directions presents way better Language pair high mid low all en-es 2.34 1.58 1.10 1.67 en-de 2.83 1.81 1.14 1.93 fr-en 1.64 1.42 1.15 1.40 de-fr 1.08 1.02 1.00 1.03 Table 4 : Ratio of target words per source words for the validation lists for some language pair on different lists results than other language pairs (10 to almost 30 points). Unlike other pairs trained on WaCKy, this pair is the only one trained on Wikipedia, contradicting the idea that \"the WaCKy corpora seem somewhat better suited for the dictionary induction task than Wikipedia\". To verify this statement, we used pre-trained word embeddings from Grave et al. (2018) to check if the corpus was really the main problem. And actually, using the pre-trained embeddings on Wikipedia or Common Crawl led to much better results than the results obtained using the WaCKy corpora, reaching about the same F1-score as the English-Spanish language pair.",
"cite_spans": [
{
"start": 3449,
"end": 3468,
"text": "Grave et al. (2018)",
"ref_id": "BIBREF7"
}
],
"ref_spans": [
{
"start": 93,
"end": 100,
"text": "Table 3",
"ref_id": "TABREF5"
},
{
"start": 679,
"end": 686,
"text": "Table 3",
"ref_id": "TABREF5"
},
{
"start": 2085,
"end": 2092,
"text": "Table 4",
"ref_id": null
},
{
"start": 2200,
"end": 2207,
"text": "Table 4",
"ref_id": null
},
{
"start": 2320,
"end": 2327,
"text": "Table 3",
"ref_id": "TABREF5"
},
{
"start": 2995,
"end": 3002,
"text": "Table 4",
"ref_id": null
}
],
"eq_spans": [],
"section": "Mixing the Candidates",
"sec_num": "3.4."
},
{
"text": "Our final results for the shared task were reported from the mixed approach for all language pairs but the two with Russian, for which we only took the results from the concatenation approach.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Results",
"sec_num": "4."
},
{
"text": "As mentioned in the shared task, we report here the problems found in the seed lexicon. We first noticed the presence of graphically identical pairs on the English-Russian pair, whereas the two languages have a different alphabet. This results are visible in Table 3 at the Perfect Cognates corresponding list. These instances are only present on the English to Russian language pair, suggesting a better control has been done for the source part of the lists. A brief inspection of the lists makes us notice the presence of multiple words not belonging to the language of interest (i.e. on the French part of the English to French seed lexicon: grammy, gov, god, northwest, phoenix and many others) and we suggest the usage of monolingual dictionary to get rid of them. We even find pairs with none of the words belonging to one of the two languages (in the German to French seed lexicon the pair times -times, which should be zeit -temps if we translate it from English, or ram -ram instead of ramm -b\u00e9lier).",
"cite_spans": [],
"ref_spans": [
{
"start": 259,
"end": 266,
"text": "Table 3",
"ref_id": "TABREF5"
}
],
"eq_spans": [],
"section": "Seed Lexicon Analysis",
"sec_num": "5."
},
{
"text": "We also observe many proper names and while some of them can be interesting to translate, most of them are graphically identical words (jura, edward, lille... on French to German or calais, guanajuato... on English to French), and we question the utility of translating such words, especially when some of them are not correctly presented (the German to French seed lexicon proposes a mans -mans pair, and we assume this is an incomplete form of the city \"Le Mans\" in France). Focusing on the French part of some lists, we notice inconsistency with the use of diacritics (i.e.\u00e9,\u00e8...), the word events in English has four proposed translations in French, each being a variation of accents:\u00e9v\u00e8nements, ev\u00e9nements, evenements, and\u00e9v\u00e9nements. While in French, both\u00e9 or\u00e8 are accepted for the second e, the first one should always be an\u00e9. The English word development being another example with developpement and d\u00e9veloppement while only the latter should be a correct translation. Still on the French part, we notice that the inflectional morphology also suffers from incoherence. In the German to French pair, allein is only translated with its masculine (seul) and feminine (seule) and not its plural forms (seuls and seules), but ausgebildet translations are only form\u00e9s and form\u00e9, forgetting the feminine forms. We add that in the English to French pair christian being translated to chr\u00e9tiens, chr\u00e9tienne, chr\u00e9tien (and christian, which can only be a proper name in French) instead of chr\u00e9tiens (and chr\u00e9tiennes which is not even here) being the translation of christians.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Seed Lexicon Analysis",
"sec_num": "5."
},
{
"text": "Finally, some conjugation omissions are observed, for the English word believe for instance, the proposed translations are croyez, croire, croient, and crois but not croyons and we later have believed with only croyait as translation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Seed Lexicon Analysis",
"sec_num": "5."
},
{
"text": "All these inconsistencies open important questions about the evaluation process and suggest a careful handcrafted validation which will undoubtedly strengthen the BLI shared task.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Seed Lexicon Analysis",
"sec_num": "5."
},
{
"text": "We presented in this paper the participation of the TALN/LS2N team at the BUCC shared task. We used concatenation of classic embeddings models (character Skipgram and character CBOW) from fastText to get our first results. Graphical proximity of many translation pairs led us to strengthen our system based on a perfect cognates strategy. This latter tend to beat embedding methods on some language pairs. As both methods were effective in different frequency ranges, we combined them to pump up our results on all the language pairs except the two containing Russian. We add that the Wikipedia corpora seem to be more suited for our approach for bilingual lexicon induction than the WaCKy corpora, contradicting the initial claim of the organizers. Finally, we noted and reported multiple problems on the training seed lexicons, the most visible one being the presence of graphically identical pairs on the English-Russian pair, whereas the two languages have a different alphabet. Also, the presence of multiple words not belonging to the language of interest and many proper names, with many of them being graphically identical, making the utility of these pairs questionable. At last, some inconsistencies are present (at least for the French part of these lists) with the inflectional morphology, and with the verb conjugation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "6."
}
],
"back_matter": [
{
"text": "We would like to thank the organizers for this exciting challenge. This research has received funding from the French National Research Agency under grant ANR-17-CE23-0001 ADDICTE (Distributional analysis in specialized domain) as well as the Canadian Institute for Data Valorisation (IVADO).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Acknowledgments",
"sec_num": null
}
],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Unsupervised hyperalignment for multilingual word embeddings",
"authors": [
{
"first": "J",
"middle": [],
"last": "Alaux",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Grave",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Cuturi",
"suffix": ""
},
{
"first": "Joulin",
"middle": [],
"last": "",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "",
"suffix": ""
}
],
"year": 2018,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Alaux, J., Grave, E., Cuturi, M., and Joulin, A. (2018). Unsupervised hyperalignment for multilingual word em- beddings. CoRR, abs/1811.01124.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Generalizing and improving bilingual word embedding mappings with a multi-step framework of linear transformations",
"authors": [
{
"first": "M",
"middle": [],
"last": "Artetxe",
"suffix": ""
},
{
"first": "G",
"middle": [],
"last": "Labaka",
"suffix": ""
},
{
"first": "Agirre",
"middle": [],
"last": "",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the Thirty-Second AAAI Conference on Artificial Intelligence",
"volume": "",
"issue": "",
"pages": "5012--5019",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Artetxe, M., Labaka, G., and Agirre, E. (2018a). Gener- alizing and improving bilingual word embedding map- pings with a multi-step framework of linear transforma- tions. In Proceedings of the Thirty-Second AAAI Con- ference on Artificial Intelligence, pages 5012-5019, New Orleans, LA, USA.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "A robust self-learning method for fully unsupervised cross-lingual mappings of word embeddings",
"authors": [
{
"first": "M",
"middle": [],
"last": "Artetxe",
"suffix": ""
},
{
"first": "G",
"middle": [],
"last": "Labaka",
"suffix": ""
},
{
"first": "Agirre",
"middle": [],
"last": "",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (ACL'18)",
"volume": "",
"issue": "",
"pages": "789--798",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Artetxe, M., Labaka, G., and Agirre, E. (2018b). A robust self-learning method for fully unsupervised cross-lingual mappings of word embeddings. In Proceedings of the 56th Annual Meeting of the Association for Computa- tional Linguistics (ACL'18), pages 789-798, Melbourne, Australia.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "The wacky wide web: a collection of very large linguistically processed web-crawled corpora. Language resources and evaluation",
"authors": [
{
"first": "M",
"middle": [],
"last": "Baroni",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "Bernardini",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Ferraresi",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Zanchetta",
"suffix": ""
}
],
"year": 2009,
"venue": "",
"volume": "43",
"issue": "",
"pages": "209--226",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Baroni, M., Bernardini, S., Ferraresi, A., and Zanchetta, E. (2009). The wacky wide web: a collection of very large linguistically processed web-crawled corpora. Language resources and evaluation, 43(3):209-226.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Enriching word vectors with subword information",
"authors": [
{
"first": "P",
"middle": [],
"last": "Bojanowski",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Grave",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Joulin",
"suffix": ""
},
{
"first": "T",
"middle": [],
"last": "Mikolov",
"suffix": ""
}
],
"year": 2016,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Bojanowski, P., Grave, E., Joulin, A., and Mikolov, T. (2016). Enriching word vectors with subword informa- tion. CoRR, abs/1607.04606.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Unsupervised multilingual word embeddings",
"authors": [
{
"first": "X",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": "C",
"middle": [],
"last": "Cardie",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "261--270",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Chen, X. and Cardie, C. (2018). Unsupervised multilin- gual word embeddings. In Proceedings of the 2018 Con- ference on Empirical Methods in Natural Language Pro- cessing, pages 261-270, Brussels, Belgium.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Word translation without parallel data",
"authors": [
{
"first": "A",
"middle": [],
"last": "Conneau",
"suffix": ""
},
{
"first": "G",
"middle": [],
"last": "Lample",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Ranzato",
"suffix": ""
},
{
"first": "L",
"middle": [],
"last": "Denoyer",
"suffix": ""
},
{
"first": "H",
"middle": [],
"last": "J\u00e9gou",
"suffix": ""
}
],
"year": 2017,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Conneau, A., Lample, G., Ranzato, M., Denoyer, L., and J\u00e9gou, H. (2017). Word translation without parallel data. CoRR, abs/1710.04087.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Learning word vectors for 157 languages",
"authors": [
{
"first": "E",
"middle": [],
"last": "Grave",
"suffix": ""
},
{
"first": "P",
"middle": [],
"last": "Bojanowski",
"suffix": ""
},
{
"first": "P",
"middle": [],
"last": "Gupta",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Joulin",
"suffix": ""
},
{
"first": "T",
"middle": [],
"last": "Mikolov",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the International Conference on Language Resources and Evaluation (LREC'18)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Grave, E., Bojanowski, P., Gupta, P., Joulin, A., and Mikolov, T. (2018). Learning word vectors for 157 lan- guages. In Proceedings of the International Conference on Language Resources and Evaluation (LREC'18), Miyazaki, Japan.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Leveraging metaembeddings for bilingual lexicon extraction from specialized comparable corpora",
"authors": [
{
"first": "A",
"middle": [],
"last": "Hazem",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Morin",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the 27th International Conference on Computational Linguistics",
"volume": "",
"issue": "",
"pages": "937--949",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hazem, A. and Morin, E. (2018). Leveraging meta- embeddings for bilingual lexicon extraction from spe- cialized comparable corpora. In Proceedings of the 27th International Conference on Computational Linguistics, pages 937-949. Association for Computational Linguis- tics.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Distributed representations of words and phrases and their compositionality",
"authors": [
{
"first": "T",
"middle": [],
"last": "Mikolov",
"suffix": ""
},
{
"first": "I",
"middle": [],
"last": "Sutskever",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": "G",
"middle": [
"S"
],
"last": "Corrado",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Dean",
"suffix": ""
}
],
"year": 2013,
"venue": "Advances in Neural Information Processing Systems",
"volume": "26",
"issue": "",
"pages": "3111--3119",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mikolov, T., Sutskever, I., Chen, K., Corrado, G. S., and Dean, J. (2013). Distributed representations of words and phrases and their compositionality. In C. J. C. Burges, et al., editors, Advances in Neural Information Processing Systems 26, pages 3111-3119. Curran Asso- ciates, Inc.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Glove: Global vectors for word representation",
"authors": [
{
"first": "J",
"middle": [],
"last": "Pennington",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Socher",
"suffix": ""
},
{
"first": "C",
"middle": [],
"last": "Manning",
"suffix": ""
}
],
"year": 2014,
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
"volume": "",
"issue": "",
"pages": "1532--1543",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Pennington, J., Socher, R., and Manning, C. (2014). Glove: Global vectors for word representation. In Pro- ceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1532- 1543, Doha, Qatar.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Offline bilingual word vectors, orthogonal transformations and the inverted softmax",
"authors": [
{
"first": "S",
"middle": [
"L"
],
"last": "Smith",
"suffix": ""
},
{
"first": "D",
"middle": [
"H P"
],
"last": "Turban",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "Hamblin",
"suffix": ""
},
{
"first": "N",
"middle": [
"Y"
],
"last": "Hammerla",
"suffix": ""
}
],
"year": 2017,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Smith, S. L., Turban, D. H. P., Hamblin, S., and Hammerla, N. Y. (2017). Offline bilingual word vectors, orthog- onal transformations and the inverted softmax. CoRR, abs/1702.03859.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "A multi-pairwise extension of Procrustes analysis for multilingual word translation",
"authors": [
{
"first": "H",
"middle": [],
"last": "Taitelbaum",
"suffix": ""
},
{
"first": "G",
"middle": [],
"last": "Chechik",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Goldberger",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP'19)",
"volume": "",
"issue": "",
"pages": "3560--3565",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Taitelbaum, H., Chechik, G., and Goldberger, J. (2019a). A multi-pairwise extension of Procrustes analysis for multilingual word translation. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP- IJCNLP'19), pages 3560-3565, Hong Kong, China.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Multilingual word translation using auxiliary languages",
"authors": [
{
"first": "H",
"middle": [],
"last": "Taitelbaum",
"suffix": ""
},
{
"first": "G",
"middle": [],
"last": "Chechik",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Goldberger",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP'19)",
"volume": "",
"issue": "",
"pages": "1330--1335",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Taitelbaum, H., Chechik, G., and Goldberger, J. (2019b). Multilingual word translation using auxiliary languages. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th In- ternational Joint Conference on Natural Language Pro- cessing (EMNLP-IJCNLP'19), pages 1330-1335, Hong Kong, China.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Unsupervised multilingual word embedding with limited resources using neural language models",
"authors": [
{
"first": "T",
"middle": [],
"last": "Wada",
"suffix": ""
},
{
"first": "T",
"middle": [],
"last": "Iwata",
"suffix": ""
},
{
"first": "Y",
"middle": [],
"last": "Matsumoto",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics (ACL'19)",
"volume": "",
"issue": "",
"pages": "3113--3124",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Wada, T., Iwata, T., and Matsumoto, Y. (2019). Unsu- pervised multilingual word embedding with limited re- sources using neural language models. In Proceedings of the 57th Annual Meeting of the Association for Computa- tional Linguistics (ACL'19), pages 3113-3124, Florence, Italy.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "Normalized word embedding and orthogonal transform for bilingual word translation",
"authors": [
{
"first": "C",
"middle": [],
"last": "Xing",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "Wang",
"suffix": ""
},
{
"first": "C",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Lin",
"middle": [],
"last": "",
"suffix": ""
},
{
"first": "Y",
"middle": [],
"last": "",
"suffix": ""
}
],
"year": 2015,
"venue": "Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL-HLT'15)",
"volume": "",
"issue": "",
"pages": "1006--1011",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Xing, C., Wang, D., Liu, C., and Lin, Y. (2015). Nor- malized word embedding and orthogonal transform for bilingual word translation. In Proceedings of the 2015 Conference of the North American Chapter of the Associ- ation for Computational Linguistics: Human Language Technologies (NAACL-HLT'15), pages 1006-1011, Den- ver, CO, USA.",
"links": null
}
},
"ref_entries": {
"TABREF1": {
"type_str": "table",
"content": "
: Corpus used for every language pair |
Our training seed lexicons are from Conneau et al. (2017), |
for the validation results, we split these lists 80/20. |
",
"html": null,
"text": "",
"num": null
},
"TABREF2": {
"type_str": "table",
"content": "en-es | 4 | 0.1 |
es-en | 2 | 0.08 |
en-de | 5 | 0.06 |
de-en | 5 | 0.04 |
en-fr | 3 | 0.08 |
fr-en | 2 | 0.04 |
en-ru | 4 | 0.05 |
ru-en | 2 | 0.03 |
de-fr | 2 | 0.08 |
fr-de | 2 | 0.06 |
",
"html": null,
"text": "Language pair Cand. \u2264 Sim. \u2265",
"num": null
},
"TABREF3": {
"type_str": "table",
"content": "",
"html": null,
"text": "Parameters for selection of candidates for every language pair",
"num": null
},
"TABREF5": {
"type_str": "table",
"content": "",
"html": null,
"text": "F1-score for our different approaches and language pairs",
"num": null
}
}
}
}