|
{ |
|
"paper_id": "I17-1023", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:37:36.122251Z" |
|
}, |
|
"title": "Geographical Evaluation of Word Embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Michal", |
|
"middle": [], |
|
"last": "Konkol", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "NTIS -New Technologies for the Information Society", |
|
"institution": "University of West Bohemia", |
|
"location": { |
|
"country": "Czech Republic" |
|
} |
|
}, |
|
"email": "konkol@kiv.zcu.cz" |
|
}, |
|
{ |
|
"first": "Tom\u00e1\u0161", |
|
"middle": [], |
|
"last": "Brychc\u00edn", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "NTIS -New Technologies for the Information Society", |
|
"institution": "University of West Bohemia", |
|
"location": { |
|
"country": "Czech Republic" |
|
} |
|
}, |
|
"email": "brychcin@kiv.zcu.cz" |
|
}, |
|
{ |
|
"first": "Michal", |
|
"middle": [], |
|
"last": "Nykl", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "NTIS -New Technologies for the Information Society", |
|
"institution": "University of West Bohemia", |
|
"location": { |
|
"country": "Czech Republic" |
|
} |
|
}, |
|
"email": "nyklm@kiv.zcu.cz" |
|
}, |
|
{ |
|
"first": "Tom\u00e1\u0161", |
|
"middle": [], |
|
"last": "Hercig", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "NTIS -New Technologies for the Information Society", |
|
"institution": "University of West Bohemia", |
|
"location": { |
|
"country": "Czech Republic" |
|
} |
|
}, |
|
"email": "hercig@kiv.zcu.cz" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Word embeddings are commonly compared either with human-annotated word similarities or through improvements in natural language processing tasks. We propose a novel principle which compares the information from word embeddings with reality. We implement this principle by comparing the information in the word embeddings with geographical positions of cities. Our evaluation linearly transforms the semantic space to optimally fit the real positions of cities and measures the deviation between the position given by word embeddings and the real position. A set of well-known word embeddings with state-of-the-art results were evaluated. We also introduce a visualization that helps with error analysis.", |
|
"pdf_parse": { |
|
"paper_id": "I17-1023", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Word embeddings are commonly compared either with human-annotated word similarities or through improvements in natural language processing tasks. We propose a novel principle which compares the information from word embeddings with reality. We implement this principle by comparing the information in the word embeddings with geographical positions of cities. Our evaluation linearly transforms the semantic space to optimally fit the real positions of cities and measures the deviation between the position given by word embeddings and the real position. A set of well-known word embeddings with state-of-the-art results were evaluated. We also introduce a visualization that helps with error analysis.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "In recent years the improvements in quality of word embeddings led to significant improvements in many natural language processing (NLP) tasks, e.g. sentiment analysis (Maas et al., 2011) , named entity recognition (Lample et al., 2016) , or machine translation (Zou et al., 2013) . New models for word embeddings and improvements to the old ones are introduced rapidly (Bojanowski et al., 2017; Salle et al., 2016; Yin and Sch\u00fctze, 2016) . As the number of various word embeddings increases, it becomes very time consuming to choose word embeddings for a particular task (Nayak et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 168, |
|
"end": 187, |
|
"text": "(Maas et al., 2011)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 215, |
|
"end": 236, |
|
"text": "(Lample et al., 2016)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 262, |
|
"end": 280, |
|
"text": "(Zou et al., 2013)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 370, |
|
"end": 395, |
|
"text": "(Bojanowski et al., 2017;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 396, |
|
"end": 415, |
|
"text": "Salle et al., 2016;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 416, |
|
"end": 438, |
|
"text": "Yin and Sch\u00fctze, 2016)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 572, |
|
"end": 592, |
|
"text": "(Nayak et al., 2016)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To mitigate the problem, it is necessary to provide appropriate evaluation together with the word embeddings. The evaluation should cover multiple properties of word embeddings in order to allow the user to choose the model directly based on the results (Nayak et al., 2016) . Many evaluation approaches have already been proposed and they can be roughly divided to intrinsic and extrinsic (Schnabel et al., 2015) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 254, |
|
"end": 274, |
|
"text": "(Nayak et al., 2016)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 390, |
|
"end": 413, |
|
"text": "(Schnabel et al., 2015)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The intrinsic evaluation measures the quality of the model directly by comparison with humanannotated data that capture semantic information. The advantage of this approach is that it is fast, simple, and easy to reproduce and analyze (Schnabel et al., 2015; Nayak et al., 2016) . The main issue is that the evaluation score often does not correlate with improvements in NLP tasks (Chiu et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 235, |
|
"end": 258, |
|
"text": "(Schnabel et al., 2015;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 259, |
|
"end": 278, |
|
"text": "Nayak et al., 2016)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 381, |
|
"end": 400, |
|
"text": "(Chiu et al., 2016)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The extrinsic evaluation is indirect and measures the improvements through other tasks -currently mainly through NLP tasks. The advantage of this approach is that for each task we know which model to choose. The disadvantage is the computational complexity (Nayak et al., 2016) . For each new word embeddings we need to train models for several approaches to several tasks and find the optimal hyperparameters of the models. Moreover, the same data and implementations should be used by all researchers for the evaluation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 257, |
|
"end": 277, |
|
"text": "(Nayak et al., 2016)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We propose a new evaluation paradigm that is in between the intrinsic and extrinsic evaluation (actually, half the people believe its intrinsic and the other half believe its extrinsic). We measure neither the semantic word similarity as in intrinsic evaluation nor improvements in a particular task that uses word embeddings. We compare the information encoded in word embeddings directly with real-world data. We implement the paradigm with geographical data. We take GPS coordinates of cities and measure to what degree is the information encoded in the word embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The paper is organized as follows. In Section 2 we describe commonly used evaluation approaches for word embeddings and discuss their strengths and weaknesses. Our evaluation metric is introduced in Section 3. In Section 4 we provide various experiments with our evaluation metric, including evaluation of state-of-the-art word embeddings. Finally, we conclude in Section 5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "There are two common tasks which fall under intrinsic evaluation: word similarity and word analogy tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In the word similarity task, the evaluation data consist of pairs of words and their similarity annotated by humans. The word embeddings are compared with the evaluation data usually by Spearman rank correlation. The word similarity task has a long tradition in the semantics research (Rubenstein and Goodenough, 1965) . Currently there are multiple corpora created to test different properties of the word embeddings (Finkelstein et al., 2001; Agirre et al., 2009; Luong et al., 2013; Hill et al., 2015) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 285, |
|
"end": 318, |
|
"text": "(Rubenstein and Goodenough, 1965)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 418, |
|
"end": 444, |
|
"text": "(Finkelstein et al., 2001;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 445, |
|
"end": 465, |
|
"text": "Agirre et al., 2009;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 466, |
|
"end": 485, |
|
"text": "Luong et al., 2013;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 486, |
|
"end": 504, |
|
"text": "Hill et al., 2015)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The word analogy task evaluates the ability of the word embeddings to capture relations between words consistently. The evaluation data consists of questions (with answers) in the form: if word a is related to word b the same way as word c is related to word d, what word is d given a, b, and c? The word embeddings are compared based on their accuracy. The Google Word Analogy corpus is usually used for evaluation (Mikolov et al., 2013a ). The word analogy task is closest to our evaluation because some of the questions are also based on real-world data, e.g. countries and their capital cities. Unlike our evaluation, they handle city names as common words, use the global semantic space, and compare them using cosine similarity.", |
|
"cite_spans": [ |
|
{ |
|
"start": 416, |
|
"end": 438, |
|
"text": "(Mikolov et al., 2013a", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The extrinsic evaluation uses other NLP tasks for comparison of word embeddings. Many tasks are used for extrinsic evaluation, e.g. sentiment analysis (Schnabel et al., 2015) , named entity recognition (Konkol et al., 2015) , or parsing (Bansal et al., 2014) . Word embeddings are compared based on the improvements measured with standard evaluation metrics for the given task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 151, |
|
"end": 174, |
|
"text": "(Schnabel et al., 2015)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 202, |
|
"end": 223, |
|
"text": "(Konkol et al., 2015)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 237, |
|
"end": 258, |
|
"text": "(Bansal et al., 2014)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Both intrinsic and extrinsic evaluations have their advantages and disadvantages. The word similarity task was analysed and criticized by multiple authors (Faruqui et al., 2016; Chiu et al., 2016; Batchkarov et al., 2016; Gladkova and Drozd, 2016) . The advantages of word similar-ity evaluation are that it is very fast and can be easily interpreted from the linguistic point of view (or generally by human). The corpora often suffer from a subset of the following disadvantages: low correlation with extrinsic evaluation (applications), polysemy is not supported, subjectivity of single value similarity, overfitting (no training, heldout, test sets), significance tests are not common for word similarity, and the data are often small.", |
|
"cite_spans": [ |
|
{ |
|
"start": 155, |
|
"end": 177, |
|
"text": "(Faruqui et al., 2016;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 178, |
|
"end": 196, |
|
"text": "Chiu et al., 2016;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 197, |
|
"end": 221, |
|
"text": "Batchkarov et al., 2016;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 222, |
|
"end": 247, |
|
"text": "Gladkova and Drozd, 2016)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The word analogy task has the same disadvantages as the word similarity task; moreover the evaluation is quite slow, because it is necessary to sort all words based on their similarity with the question. Linzen (2016) provides a detailed analysis of the word analogy task and shows that results in this evaluation are to a large extent based on proximity in the semantic space rather than consistent offsets between the word pairs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The main advantage of the extrinsic evaluation is that it directly measures application improvements. The main disadvantage is computational complexity. There exist many tasks that could be used for evaluation, but it is intractable to use all of them (Nayak et al., 2016) . Moreover, there exist many approaches to all the tasks and some embeddings might be good for one approach and bad for the others. Choosing a single approach as a general benchmark could lead to incorrect conclusions. If we still want to choose a single model, then which one? On one hand, the stateof-the-art approaches of the tasks evolve in timestate-of-the-art method may well become a baseline in a few years. On the other hand, using baseline approaches loses the ability to measure application improvements. Word embeddings may have a high score with the baseline approach, but may contain the same information that is already present in the state-of-the-art approach. Other embeddings may have low score with the baseline approach, but the information may be usable in the state-of-the-art approach.", |
|
"cite_spans": [ |
|
{ |
|
"start": 252, |
|
"end": 272, |
|
"text": "(Nayak et al., 2016)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Many disadvantages of intrinsic evaluation are also related to particular tasks in extrinsic evaluation, e.g. named entity recognition or sentiment analysis usually do not use significance tests, are subjective, or use small data sets. Nayak et al. (2016) propose a system for standard automatic extrinsic evaluation. They selected a representative subset of tasks for the evaluation and chose a single approach for each task (based on standard neural network architectures) in order to achieve reasonable evaluation times (4-5 hours). Even though this approach has the disadvantages presented in the previous two paragraphs, it is definitely a step forward to a standardized evaluation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 236, |
|
"end": 255, |
|
"text": "Nayak et al. (2016)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The evaluation data set consists of a list of n names of cities and their GPS coordinates stored in matrix G \u2208 R n\u00d72 . We assume that Earth is perfectly spherical and its radius is 6,371. Given the assumption, the GPS coordinates in matrix G can be transformed to Euclidean coordinates in matrix Y \u2208 R n\u00d73 and back. The word embeddings of cities are in matrix X \u2208 R n\u00d7d with a d-dimensional vector for each city name. We normalize rows of X and Y, because it is helpful for stability of the optimization and we need only the cosine similarity between the rows.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Evaluation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The first step of our evaluation is to find a subspace of the original d-dimensional word embeddings space that contains the information about city locations. The word embeddings transformed to the subspace are represented by matrix W \u2208 R n\u00d73 . The matrices W and Y have to share the same dimensions because we want to compare the distances between their rows (cities). We are looking for a linear transformation W = XT parametrized by transformation matrix T \u2208 R d\u00d73 . We use the least squares cost function, the optimal transformation matrix T * is defined as a transformation matrix that minimizes squared distances between real and approximate city positions W \u2212 Y 2 . This optimization problem is highly prone to overfitting as n \u2248 d; moreover the row rank of X is likely lower than n, because the embeddings for cities are highly correlated and thus they are likely linearly dependent. Thus we employ L 2 regularization. The final optimization problem is given by Equation 1, where \u03b1 is the regularization weight.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Evaluation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "T * = arg min T ( XT \u2212 Y 2 + \u03b1 T 2 ) (1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Evaluation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Finally, we can compare W and Y. The primary metric for the evaluation is mean geographic distance, i.e. the distance between two points on a globe measured on the surface. We firstly need to normalize rows of W because the vectors can be above or below the surface. The geographic distance can be measured using Equation 2, where g is the geographic distance, w i and y i denote the i-th row of W and Y respectively, and r is the radius of Earth.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Evaluation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "g = r \u2022 arccos(w i \u2022 y i )", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Proposed Evaluation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "While the mean geographic distance is a good metric for a global view, it does not take the local structure into account, i.e. a random model moves the cities in all directions and breaks the local structure (nearest neighbors), but other model (with the same mean geographic distance) can move the cities in one direction and preserve local structure. We measure the ability of the embeddings to capture local structure by Precision at K (Prec@K). This metric creates two sets of K nearest neighbors for each city, one for the evaluation data Y and one for the transformed word embeddings W. The precision between these two sets is averaged over all cities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Evaluation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We also provide more statistics that help with understanding of the primary score. Median geographic distance gives a better idea about common distances, because it is not affected by extreme values. Sometimes, we found it easier to think about the errors in angles rather than distances, mainly because angles are independent of the size of the globe.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Evaluation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In this section we firstly describe the data used for the proposed evaluation. Then we briefly introduce the word embeddings used to demonstrate the proposed evaluation. Finally, we follow with experiments that show some properties of the evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We downloaded the list of 640 known cities from https://www.timeanddate.com/ worldclock/full.html and further adjusted it. We removed cities that consist of multiple words from the list, because the evaluated models were trained only on single word expressions. It has lead to a reduction of the set to 540 cities. Then we created a dictionary of the top 10,000 words from Wikipedia and filtered out cities not present in the models, which resulted into a set of 483 cities. Finally we removed cities with ambiguous names and inconsistent use of diacritics, leaving us with 440 cities. Table 1 : Results of the selected set of word embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 586, |
|
"end": 593, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The data needed to be split into the training and test set. The training set is used to find optimal transformation matrix T * and optimal regularization weight \u03b1. The test set is used for the evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We manually selected the train set from the cities to evenly cover geographical area by the cities with the highest Wikipedia term frequency. The final train set contains 124 cities and the final test set contains 316 cities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We chose a set of well-known word embeddings to show their differences using the proposed evaluation. In the following paragraphs we briefly introduce the chosen word embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Embeddings", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "SkipGram is a neural network based model (Mikolov et al., 2013b) . Levy and Goldberg (2014) provide trained SkipGram models with two sizes of the context window (2, 5) and their own model that uses dependency-based context, denoted by SkipGram -BoW2, SkipGram -BoW5, and SkipGram -Dep, respectively.", |
|
"cite_spans": [ |
|
{ |
|
"start": 41, |
|
"end": 64, |
|
"text": "(Mikolov et al., 2013b)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 67, |
|
"end": 91, |
|
"text": "Levy and Goldberg (2014)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Embeddings", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "GloVe is a log-bilinear model that tries to find word embeddings that are good at predicting global word co-occurence statistics (Pennington et al., 2014) . We use embeddings provided by authors of the model trained on various corpus sizes (6, 42, and 840 billions words) and with various vector dimensions (50, 100, 200, 300).", |
|
"cite_spans": [ |
|
{ |
|
"start": 129, |
|
"end": 154, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Embeddings", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "FastText is an extension to SkipGram, where the word is represented as character n-grams (Bojanowski et al., 2017) . We use embeddings provided by authors of the model trained on Wikipedia.", |
|
"cite_spans": [ |
|
{ |
|
"start": 89, |
|
"end": 114, |
|
"text": "(Bojanowski et al., 2017)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Embeddings", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "LexVec is based on factorization of positive point-wise mutual information matrix using proven strategies from GloVe, SkipGram, and methods based on singular value decomposition (Salle et al., 2016) . We use two models provided by the authors of the model trained on Wikipedia and News Crawl (LexVec -w + nc), and Common Crawl (LexVec -cc).", |
|
"cite_spans": [ |
|
{ |
|
"start": 178, |
|
"end": 198, |
|
"text": "(Salle et al., 2016)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Embeddings", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "MetaEmbeddings is an ensemble method that combines several embeddings (Yin and Sch\u00fctze, 2016) . We use the embeddings provided by the authors of the model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 70, |
|
"end": 93, |
|
"text": "(Yin and Sch\u00fctze, 2016)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Embeddings", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "WoRel is an extension of SkipGram, where a phrase (instead of a word) is used to guess the context words (Konkol, 2017) . We use the model provided by the authors trained on Wikipedia and Gigaword corpus.", |
|
"cite_spans": [ |
|
{ |
|
"start": 105, |
|
"end": 119, |
|
"text": "(Konkol, 2017)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Embeddings", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "LSA is a count based method that creates a word-document co-occurrence matrix and reduces its dimension by singular value decomposition (SVD) (Landauer et al., 1998) . We trained the models on Wikipedia.", |
|
"cite_spans": [ |
|
{ |
|
"start": 142, |
|
"end": 165, |
|
"text": "(Landauer et al., 1998)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Embeddings", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "PPMI-SVD creates word co-occurrence matrix where the co-ocurence is measured by positive pointwise mutual information. The dimension of the matrix is then reduced by SVD. We used the hyperwords package (Levy et al., 2015) and trained it on Wikipedia and Gigaword corpus.", |
|
"cite_spans": [ |
|
{ |
|
"start": 202, |
|
"end": 221, |
|
"text": "(Levy et al., 2015)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Embeddings", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In our first experiment we evaluate the selected set of embeddings with the proposed evaluation metric. The results are shown in Table 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 129, |
|
"end": 136, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "We provide results for two baselines. The first baseline (random placement) places cities randomly on the globe. The results for this baseline are computed analytically. The second baseline generates random embeddings, each value is selected randomly from uniform distribution between \u22121 and 1. The random embeddings are then evaluated in the same way as normal embeddings. The results show average results for five random embeddings. The comparison of the baselines show that the evaluation works as expected: random embeddings produce randomly placed cities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The results show that all the evaluated word embeddings are significantly better than the baselines. This proves that the embeddings do not capture only the similarity between words but also nontrivial knowledge about the world.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Most geographic information was clearly captured by LSA, followed by FastText. A group of models, namely WoRel, SkipGram -BoW5, PPMI-SVD, and LexVec, achieved similar results and are only slightly worse than FastText. Surprisingly, GloVe (trained with similar amount of data) performed significantly worse. MetaEmbeddings achieved the worst results, probably because the ensemble was optimized for other purposes.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "There is a high correlation between the performance in the mean geographic distance and Prec@10 measures. Models that are good at capturing global structure tend to be good at capturing local structure.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The type of the training data is probably more important than the size of the data. This can be seen on the LexVec models, where the model trained on Wikipedia and news articles outperforms the other model trained on significantly more data. Still, an extreme amount of data leads to good results as seen on the results of GloVe trained on various corpus sizes.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Recently, most of the NLP tasks use word embeddings based on local (window-based) context. Surprisingly, our evaluation shows that LSA, a Cities ordered by distance error", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "LSA FastText LexVec-w+nc WoRel PPMI SkipGram-BoW5", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "GloVe-6B-300d MetaEmbeddings Figure 1 : Distribution of distance errors. method based on global (document-wide) context, outperforms all the other models in the proposed evaluation. The comparison of count based (PPMI-SVD) and predictive models (e.g. Skip-Gram, FastText) shows no significant differences between these two approaches.", |
|
"cite_spans": [ |
|
{ |
|
"start": 251, |
|
"end": 271, |
|
"text": "Skip-Gram, FastText)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 29, |
|
"end": 37, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Our evaluation shows that the mainstream models such as SkipGram and GloVe that perform similarly in intrinsic word similarity and extrinsic task based evaluations may have very different results in other types of evaluation. Figure 1 shows the distribution of geographic distance errors for individual cities. The distance error is reasonable (\u2264 2500 km) for approximately 90% of the cities for most of the word embeddings. Unfortunately, the rest of the cities has significantly larger error. In this section, we try to identify the source of the extreme errors.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 226, |
|
"end": 234, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Firstly, we suspected that the reason is sparseness and the extreme errors are caused by underrepresented words. In Figure 2 we show a relation between the number of occurrences of the city name in Wikipedia (training data for most of the methods) and the mean distance error. The word occurrences are equidistantly grouped into ten bins. We concluded that there is no clear relation between the number of occurrences of a city name and the distance error.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 116, |
|
"end": 124, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "We also suspected ambiguity with common words. To check this hypothesis, we counted how GloVe-6B-300d MetaEmbeddings Figure 2 : Relation between the number of occurrences of the city name and the distance error for LSA. many times the city name appears as lowercase and how many times with a capital letter. We found out that most of the words does not appear at all in lowercase version. A small portion of words has significant number of occurrences of the lowercase version (e.g. Phoenix), but they do not correlate with the distance error.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 117, |
|
"end": 125, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Lastly, we manually checked all the cities with extreme distance errors. We found out that the main problem is ambiguity with other named entities that are more famous than the city, e.g. the city Kobe is overshadowed by Kobe Bryant, Bismarck by Otto von Bismarck, Montgomery by the common first name. A special case of this problem is multiple cities with the same name. This is not a problem if there is large difference between the fame of the cities (e.g. London), but it is a problem for cities that are similar in size and fame (e.g. Midland, Kingstown, Bridgetown).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "The dimension of the word embeddings obviously affects their results (Table 1) . In this experiment we explore the effect of higher dimensions on the results. This should provide a hint to the authors of the semantic spaces how to choose the appropriate dimension.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 69, |
|
"end": 78, |
|
"text": "(Table 1)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Embeddings Dimension", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "In Figure 3 , we show the results of LSA with dimension ranging from 100 to 1000. The performance degrades quickly as we decrease the dimen- sion under 300. The results slightly improve as we increase the dimension from 300 to 600. There are no significant improvements as the dimension is increased over 600.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 11, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Embeddings Dimension", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "The proposed evaluation uses regularization and requires the regularization weight \u03b1. Setting optimal regularization weight is difficult for some algorithms. We conducted an experiment to prove that the regularization weight does not play an important role in the evaluation, i.e. the scores of the embeddings are not heavily affected by our inability to find optimal regularization weights.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Regularization", |
|
"sec_num": "4.6" |
|
}, |
|
{ |
|
"text": "We performed randomized 10-fold crossvalidation to find optimal regularization weight multiple times. The variance of the found regularization weights and also the impact of this variance were very small for a particular word embeddings method. Moreover, the optimal regularization weight is very similar for all the word embeddings. Figure 4 shows the mean geographic distance as a function of the regularization weight and suggests that the function can be easily optimized.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 334, |
|
"end": 342, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Regularization", |
|
"sec_num": "4.6" |
|
}, |
|
{ |
|
"text": "Given a set of models, the evaluation metric should be able to rank them reliably based on their quality. Batchkarov et al. (2016) propose a test of the reliability. They incrementally add noise to ", |
|
"cite_spans": [ |
|
{ |
|
"start": 106, |
|
"end": 130, |
|
"text": "Batchkarov et al. (2016)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Noise Sensitivity", |
|
"sec_num": "4.7" |
|
}, |
|
{ |
|
"text": "Regularization -\u03b2 LSA FastText LexVec-w+nc WoRel PPMI SkipGram-BoW5", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Noise Sensitivity", |
|
"sec_num": "4.7" |
|
}, |
|
{ |
|
"text": "GloVe-6B-300d MetaEmbeddings Figure 4 : Influence of the regularization weight \u03b1 = e \u03b2 on the mean geographic distance. The values are computed using 10 fold cross-validation on the training data.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 29, |
|
"end": 37, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Noise Sensitivity", |
|
"sec_num": "4.7" |
|
}, |
|
{ |
|
"text": "word embeddings and assume that word embeddings with more noise have lower quality. The metric should be able to capture the differences of the quality and smoothly and monotonically go from good results to results of random embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Noise Sensitivity", |
|
"sec_num": "4.7" |
|
}, |
|
{ |
|
"text": "In Figure 5 we show the behavior of the proposed metric. We use the best embeddings (LSA) as a starting point. Then we add noise uniformly sampled from interval [0, p] to each value in the embeddings. The parameter p is incrementally increased with step 0.01 from 0 to 1. For each value of p we repeat the evaluation 1000 times. The proposed metric works as expected. Firstly, the mean distance error almost linearly increases. As the embeddings become more random the increases slow down until they converge to the results of random embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 11, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Noise Sensitivity", |
|
"sec_num": "4.7" |
|
}, |
|
{ |
|
"text": "As a side effect, our evaluation approach also produces a natural visualization presented in Figure 6 . The visualization can be used for comparison of methods, error analysis, or demonstration of semantics and unsupervised learning. The transformation also allows us to visualize common words on the map, not only city names. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 93, |
|
"end": 101, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Visualization", |
|
"sec_num": "4.8" |
|
}, |
|
{ |
|
"text": "We have proposed a new evaluation method for word embeddings. It measures how much information about geographic location of cities is contained in word embeddings. This type of evaluation differs from previously presented evaluations and forms a new word embeddings evaluation paradigm. The new paradigm does not evaluate the embeddings from the natural language processing view, but rather from the artificial intelligence view, where the algorithm tries to capture some information about the world.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We have analyzed both the evaluation metric and commonly used embeddings. We have shown that the metric is stable and can reliably distinguish between good and poor models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "LSA achieved the best results with mean geographic distance error of 1437 kilometers. Surprisingly, it outperformed mainstream models such as SkipGram. GloVe, with state-of-the-art results from other evaluations, performed rather poorly in the proposed evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In the future, we would like to implement the proposed paradigm with other similar evaluations, where we try to find out if the model is able to capture a specific real-world information.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The dataset and the evaluation software can be downloaded from the authors' websites 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 85, |
|
"end": 86, |
|
"text": "1", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This publication was supported by the project LO1506 of the Czech Ministry of Education, Youth and Sports under the program NPU I and by the university specific research project SGS-2016-018 Data and Software Engineering for Advanced Applications.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "A study on similarity and relatedness using distributional and wordnet-based approaches", |
|
"authors": [ |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Enrique", |
|
"middle": [], |
|
"last": "Alfonseca", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Keith", |
|
"middle": [], |
|
"last": "Hall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jana", |
|
"middle": [], |
|
"last": "Kravalova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eneko Agirre, Enrique Alfonseca, Keith Hall, Jana Kravalova, Marius Pa\u015fca, and Aitor Soroa. 2009. A study on similarity and relatedness using distribu- tional and wordnet-based approaches. In Proceed- ings of Human Language Technologies: The 2009", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Annual Conference of the North American Chapter of the Association for Computational Linguistics, NAACL '09", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "19--27", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Annual Conference of the North American Chap- ter of the Association for Computational Linguistics, NAACL '09, pages 19-27, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Tailoring continuous word representations for dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Gimpel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karen", |
|
"middle": [], |
|
"last": "Livescu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "809--815", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohit Bansal, Kevin Gimpel, and Karen Livescu. 2014. Tailoring continuous word representations for dependency parsing. In Proceedings of the 52nd An- nual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 809- 815, Baltimore, Maryland. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Currently at konkol.me and nlp", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Currently at konkol.me and nlp.kiv.zcu.cz", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "A critique of word similarity as a method for evaluating distributional semantic models", |
|
"authors": [ |
|
{ |
|
"first": "Miroslav", |
|
"middle": [], |
|
"last": "Batchkarov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Kober", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeremy", |
|
"middle": [], |
|
"last": "Reffin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julie", |
|
"middle": [], |
|
"last": "Weeds", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Weir", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 1st Workshop on Evaluating Vector-Space Representations for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7--12", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Miroslav Batchkarov, Thomas Kober, Jeremy Reffin, Julie Weeds, and David Weir. 2016. A critique of word similarity as a method for evaluating distribu- tional semantic models. In Proceedings of the 1st Workshop on Evaluating Vector-Space Representa- tions for NLP, pages 7-12, Berlin, Germany. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Enriching word vectors with subword information", |
|
"authors": [ |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "135--146", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enriching word vectors with subword information. Transactions of the Associa- tion for Computational Linguistics, 5:135-146.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Intrinsic evaluation of word vectors fails to predict extrinsic performance", |
|
"authors": [ |
|
{ |
|
"first": "Billy", |
|
"middle": [], |
|
"last": "Chiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Korhonen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sampo", |
|
"middle": [], |
|
"last": "Pyysalo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "The First Workshop on Evaluating Vector Space Representations for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Billy Chiu, Anna Korhonen, and Sampo Pyysalo. 2016. Intrinsic evaluation of word vectors fails to predict extrinsic performance. In The First Workshop on Evaluating Vector Space Representations for NLP. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Problems with evaluation of word embeddings using word similarity tasks", |
|
"authors": [ |
|
{ |
|
"first": "Manaal", |
|
"middle": [], |
|
"last": "Faruqui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yulia", |
|
"middle": [], |
|
"last": "Tsvetkov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pushpendre", |
|
"middle": [], |
|
"last": "Rastogi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 1st Workshop on Evaluating Vector-Space Representations for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "30--35", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Manaal Faruqui, Yulia Tsvetkov, Pushpendre Rastogi, and Chris Dyer. 2016. Problems with evaluation of word embeddings using word similarity tasks. In Proceedings of the 1st Workshop on Evaluating Vector-Space Representations for NLP, pages 30- 35, Berlin, Germany. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Placing search in context: The concept revisited", |
|
"authors": [ |
|
{ |
|
"first": "Lev", |
|
"middle": [], |
|
"last": "Finkelstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evgeniy", |
|
"middle": [], |
|
"last": "Gabrilovich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yossi", |
|
"middle": [], |
|
"last": "Matias", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ehud", |
|
"middle": [], |
|
"last": "Rivlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zach", |
|
"middle": [], |
|
"last": "Solan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gadi", |
|
"middle": [], |
|
"last": "Wolfman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eytan", |
|
"middle": [], |
|
"last": "Ruppin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of the 10th International Conference on World Wide Web, WWW '01", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "406--414", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lev Finkelstein, Evgeniy Gabrilovich, Yossi Matias, Ehud Rivlin, Zach Solan, Gadi Wolfman, and Eytan Ruppin. 2001. Placing search in context: The con- cept revisited. In Proceedings of the 10th Interna- tional Conference on World Wide Web, WWW '01, pages 406-414, New York, NY, USA. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Intrinsic evaluations of word embeddings: What can we do better?", |
|
"authors": [ |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Gladkova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aleksandr", |
|
"middle": [], |
|
"last": "Drozd", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 1st Workshop on Evaluating Vector-Space Representations for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "36--42", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anna Gladkova and Aleksandr Drozd. 2016. Intrinsic evaluations of word embeddings: What can we do better? In Proceedings of the 1st Workshop on Eval- uating Vector-Space Representations for NLP, pages 36-42, Berlin, Germany. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Simlex-999: Evaluating semantic models with genuine similarity estimation", |
|
"authors": [ |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roi", |
|
"middle": [], |
|
"last": "Reichart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Korhonen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Comput. Linguist", |
|
"volume": "41", |
|
"issue": "4", |
|
"pages": "665--695", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Felix Hill, Roi Reichart, and Anna Korhonen. 2015. Simlex-999: Evaluating semantic models with gen- uine similarity estimation. Comput. Linguist., 41(4):665-695.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Joint Unsupervised Learning of Semantic Representation of Words and Roles in Dependency Trees", |
|
"authors": [ |
|
{ |
|
"first": "Michal", |
|
"middle": [], |
|
"last": "Konkol", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "RANLP 2017 -Recent Advances in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michal Konkol. 2017. Joint Unsupervised Learning of Semantic Representation of Words and Roles in De- pendency Trees. In RANLP 2017 -Recent Advances in Natural Language Processing.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Latent semantics in named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Michal", |
|
"middle": [], |
|
"last": "Konkol", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom\u00e1\u0161", |
|
"middle": [], |
|
"last": "Brychc\u00edn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miloslav", |
|
"middle": [], |
|
"last": "Konop\u00edk", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Expert Systems with Applications", |
|
"volume": "42", |
|
"issue": "7", |
|
"pages": "3470--3479", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michal Konkol, Tom\u00e1\u0161 Brychc\u00edn, and Miloslav Konop\u00edk. 2015. Latent semantics in named en- tity recognition. Expert Systems with Applications, 42(7):3470 -3479.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Neural architectures for named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miguel", |
|
"middle": [], |
|
"last": "Ballesteros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandeep", |
|
"middle": [], |
|
"last": "Subramanian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kazuya", |
|
"middle": [], |
|
"last": "Kawakami", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "260--270", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Lample, Miguel Ballesteros, Sandeep Sub- ramanian, Kazuya Kawakami, and Chris Dyer. 2016. Neural architectures for named entity recognition. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, pages 260-270, San Diego, California. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "An introduction to latent semantic analysis", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Thomas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Landauer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Darrell", |
|
"middle": [], |
|
"last": "Foltz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Laham", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Discourse processes", |
|
"volume": "25", |
|
"issue": "", |
|
"pages": "259--284", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas K Landauer, Peter W Foltz, and Darrell La- ham. 1998. An introduction to latent semantic anal- ysis. Discourse processes, 25(2-3):259-284.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Dependencybased word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "302--308", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Omer Levy and Yoav Goldberg. 2014. Dependency- based word embeddings. In Proceedings of the 52nd Annual Meeting of the Association for Com- putational Linguistics (Volume 2: Short Papers), pages 302-308, Baltimore, Maryland. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Improving distributional similarity with lessons learned from word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "TACL", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "211--225", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Omer Levy, Yoav Goldberg, and Ido Dagan. 2015. Im- proving distributional similarity with lessons learned from word embeddings. TACL, 3:211-225.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Issues in evaluating semantic spaces using word analogies", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Tal Linzen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 1st Workshop on Evaluating Vector-Space Representations for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "13--18", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tal Linzen. 2016. Issues in evaluating semantic spaces using word analogies. In Proceedings of the 1st Workshop on Evaluating Vector-Space Representa- tions for NLP, pages 13-18, Berlin, Germany. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Better word representations with recursive neural networks for morphology", |
|
"authors": [ |
|
{ |
|
"first": "Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the Seventeenth Conference on Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "104--113", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thang Luong, Richard Socher, and Christopher D. Manning. 2013. Better word representations with recursive neural networks for morphology. In Pro- ceedings of the Seventeenth Conference on Compu- tational Natural Language Learning, CoNLL 2013, Sofia, Bulgaria, August 8-9, 2013, pages 104-113.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Learning word vectors for sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Maas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raymond", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Daly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Potts", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "142--150", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrew L. Maas, Raymond E. Daly, Peter T. Pham, Dan Huang, Andrew Y. Ng, and Christopher Potts. 2011. Learning word vectors for sentiment analy- sis. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Hu- man Language Technologies -Volume 1, HLT '11, pages 142-150, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Efficient estimation of word representations in vector space", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "CoRR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013a. Efficient estimation of word represen- tations in vector space. CoRR, abs/1301.3781.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Efficient estimation of word representations in vector space", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "CoRR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013b. Efficient estimation of word represen- tations in vector space. CoRR, abs/1301.3781.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Evaluating word embeddings using a representative suite of practical tasks", |
|
"authors": [ |
|
{ |
|
"first": "Neha", |
|
"middle": [], |
|
"last": "Nayak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gabor", |
|
"middle": [], |
|
"last": "Angeli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "The First Workshop on Evaluating Vector Space Representations for NLP. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Neha Nayak, Gabor Angeli, and Christopher D Man- ning. 2016. Evaluating word embeddings using a representative suite of practical tasks. In The First Workshop on Evaluating Vector Space Representa- tions for NLP. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christo- pher D. Manning. 2014. Glove: Global vectors for word representation. In Empirical Methods in Nat- ural Language Processing (EMNLP), pages 1532- 1543.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Contextual correlates of synonymy", |
|
"authors": [ |
|
{ |
|
"first": "Herbert", |
|
"middle": [], |
|
"last": "Rubenstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Goodenough", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1965, |
|
"venue": "Commun. ACM", |
|
"volume": "8", |
|
"issue": "10", |
|
"pages": "627--633", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Herbert Rubenstein and John B. Goodenough. 1965. Contextual correlates of synonymy. Commun. ACM, 8(10):627-633.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Matrix factorization using window sampling and negative sampling for improved word representations", |
|
"authors": [ |
|
{ |
|
"first": "Alexandre", |
|
"middle": [], |
|
"last": "Salle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aline", |
|
"middle": [], |
|
"last": "Villavicencio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Idiart", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "419--424", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexandre Salle, Aline Villavicencio, and Marco Idiart. 2016. Matrix factorization using window sampling and negative sampling for improved word represen- tations. In Proceedings of the 54th Annual Meet- ing of the Association for Computational Linguistics (Volume 2: Short Papers), pages 419-424, Berlin, Germany. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Evaluation methods for unsupervised word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Tobias", |
|
"middle": [], |
|
"last": "Schnabel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Igor", |
|
"middle": [], |
|
"last": "Labutov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Mimno", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thorsten", |
|
"middle": [], |
|
"last": "Joachims", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "298--307", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tobias Schnabel, Igor Labutov, David Mimno, and Thorsten Joachims. 2015. Evaluation methods for unsupervised word embeddings. In Proceedings of the 2015 Conference on Empirical Methods in Nat- ural Language Processing, pages 298-307, Lisbon, Portugal. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Learning word meta-embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Wenpeng", |
|
"middle": [], |
|
"last": "Yin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hinrich", |
|
"middle": [], |
|
"last": "Sch\u00fctze", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1351--1360", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wenpeng Yin and Hinrich Sch\u00fctze. 2016. Learning word meta-embeddings. In Proceedings of the 54th Annual Meeting of the Association for Computa- tional Linguistics (Volume 1: Long Papers), pages 1351-1360, Berlin, Germany. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Bilingual word embeddings for phrase-based machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Will", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Zou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Cer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1393--1398", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Will Y. Zou, Richard Socher, Daniel M. Cer, and Christopher D. Manning. 2013. Bilingual word em- beddings for phrase-based machine translation. In EMNLP, pages 1393-1398. ACL.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF2": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "The relation between dimension of the vector space and the mean distance error for LSA." |
|
}, |
|
"FIGREF4": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "The effect of noise added to LSA embeddings. The figure shows mean value and standard deviation of 1000 runs. The red line on the top represents random city placement." |
|
}, |
|
"FIGREF5": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "A visualization of selected cities placed on the map based on various word embeddings. Each circle denotes one city." |
|
} |
|
} |
|
} |
|
} |