Benjamin Aw
Add updated pkl file v3
6fa4bc9
raw
history blame contribute delete
No virus
88.2 kB
{
"paper_id": "I08-1042",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T07:40:22.268212Z"
},
"title": "Heterogeneous Automatic MT Evaluation Through Non-Parametric Metric Combinations",
"authors": [
{
"first": "Jes\u00fas",
"middle": [],
"last": "Gim\u00e9nez",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "LSI Department Universitat Polit\u00e8cnica de Catalunya Jordi Girona",
"location": {
"addrLine": "Salgado 1-3",
"postCode": "E-08034",
"settlement": "Barcelona"
}
},
"email": "jgimenez@lsi.upc.edu"
},
{
"first": "Llu\u00eds",
"middle": [],
"last": "M\u00e0rquez",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "LSI Department Universitat Polit\u00e8cnica de Catalunya Jordi Girona",
"location": {
"addrLine": "Salgado 1-3",
"postCode": "E-08034",
"settlement": "Barcelona"
}
},
"email": "lluism@lsi.upc.edu"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "Combining different metrics into a single measure of quality seems the most direct and natural way to improve over the quality of individual metrics. Recently, several approaches have been suggested (Kulesza and Shieber, 2004; Liu and Gildea, 2007; Albrecht and Hwa, 2007a). Although based on different assumptions, these approaches share the common characteristic of being parametric. Their models involve a number of parameters whose weight must be adjusted. As an alternative, in this work, we study the behaviour of non-parametric schemes, in which metrics are combined without having to adjust their relative importance. Besides, rather than limiting to the lexical dimension, we work on a wide set of metrics operating at different linguistic levels (e.g., lexical, syntactic and semantic). Experimental results show that non-parametric methods are a valid means of putting different quality dimensions together, thus tracing a possible path towards heterogeneous automatic MT evaluation.",
"pdf_parse": {
"paper_id": "I08-1042",
"_pdf_hash": "",
"abstract": [
{
"text": "Combining different metrics into a single measure of quality seems the most direct and natural way to improve over the quality of individual metrics. Recently, several approaches have been suggested (Kulesza and Shieber, 2004; Liu and Gildea, 2007; Albrecht and Hwa, 2007a). Although based on different assumptions, these approaches share the common characteristic of being parametric. Their models involve a number of parameters whose weight must be adjusted. As an alternative, in this work, we study the behaviour of non-parametric schemes, in which metrics are combined without having to adjust their relative importance. Besides, rather than limiting to the lexical dimension, we work on a wide set of metrics operating at different linguistic levels (e.g., lexical, syntactic and semantic). Experimental results show that non-parametric methods are a valid means of putting different quality dimensions together, thus tracing a possible path towards heterogeneous automatic MT evaluation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Automatic evaluation metrics have notably accelerated the development cycle of MT systems in the last decade. There exist a large number of metrics based on different similarity criteria. By far, the most widely used metric in recent literature is BLEU (Papineni et al., 2001) . Other well-known metrics are WER (Nie\u00dfen et al., 2000) , NIST (Doddington, 2002) , GTM (Melamed et al., 2003) , ROUGE (Lin and Och, 2004a) , METEOR (Banerjee and Lavie, 2005) , and TER (Snover et al., 2006) , just to name a few. All these metrics take into account information at the lexical level 1 , and, therefore, their reliability depends very strongly on the heterogeneity/representativity of the set of reference translations available (Culy and Riehemann, 2003) . In order to overcome this limitation several authors have suggested taking advantage of paraphrasing support (Zhou et al., 2006; Kauchak and Barzilay, 2006; Owczarzak et al., 2006) . Other authors have tried to exploit information at deeper linguistic levels. For instance, we may find metrics based on full constituent parsing (Liu and Gildea, 2005) , and on dependency parsing (Liu and Gildea, 2005; Amig\u00f3 et al., 2006; Mehay and Brew, 2007; Owczarzak et al., 2007) . We may find also metrics at the level of shallow-semantics, e.g., over semantic roles and named entities (Gim\u00e9nez and M\u00e0rquez, 2007) , and at the properly semantic level, e.g., over discourse representations (Gim\u00e9nez, 2007) .",
"cite_spans": [
{
"start": 253,
"end": 276,
"text": "(Papineni et al., 2001)",
"ref_id": "BIBREF25"
},
{
"start": 312,
"end": 333,
"text": "(Nie\u00dfen et al., 2000)",
"ref_id": "BIBREF22"
},
{
"start": 336,
"end": 340,
"text": "NIST",
"ref_id": null
},
{
"start": 341,
"end": 359,
"text": "(Doddington, 2002)",
"ref_id": "BIBREF8"
},
{
"start": 362,
"end": 365,
"text": "GTM",
"ref_id": null
},
{
"start": 366,
"end": 388,
"text": "(Melamed et al., 2003)",
"ref_id": "BIBREF21"
},
{
"start": 391,
"end": 396,
"text": "ROUGE",
"ref_id": null
},
{
"start": 397,
"end": 417,
"text": "(Lin and Och, 2004a)",
"ref_id": "BIBREF16"
},
{
"start": 427,
"end": 453,
"text": "(Banerjee and Lavie, 2005)",
"ref_id": "BIBREF4"
},
{
"start": 464,
"end": 485,
"text": "(Snover et al., 2006)",
"ref_id": "BIBREF27"
},
{
"start": 722,
"end": 748,
"text": "(Culy and Riehemann, 2003)",
"ref_id": "BIBREF7"
},
{
"start": 860,
"end": 879,
"text": "(Zhou et al., 2006;",
"ref_id": "BIBREF28"
},
{
"start": 880,
"end": 907,
"text": "Kauchak and Barzilay, 2006;",
"ref_id": "BIBREF12"
},
{
"start": 908,
"end": 931,
"text": "Owczarzak et al., 2006)",
"ref_id": "BIBREF23"
},
{
"start": 1079,
"end": 1101,
"text": "(Liu and Gildea, 2005)",
"ref_id": "BIBREF18"
},
{
"start": 1130,
"end": 1152,
"text": "(Liu and Gildea, 2005;",
"ref_id": "BIBREF18"
},
{
"start": 1153,
"end": 1172,
"text": "Amig\u00f3 et al., 2006;",
"ref_id": "BIBREF3"
},
{
"start": 1173,
"end": 1194,
"text": "Mehay and Brew, 2007;",
"ref_id": "BIBREF20"
},
{
"start": 1195,
"end": 1218,
"text": "Owczarzak et al., 2007)",
"ref_id": "BIBREF24"
},
{
"start": 1326,
"end": 1353,
"text": "(Gim\u00e9nez and M\u00e0rquez, 2007)",
"ref_id": "BIBREF10"
},
{
"start": 1429,
"end": 1444,
"text": "(Gim\u00e9nez, 2007)",
"ref_id": "BIBREF11"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "However, none of current metrics provides, in isolation, a global measure of quality. Indeed, all metrics focus on partial aspects of quality. The main problem of relying on partial metrics is that we may obtain biased evaluations, which may lead us to derive inaccurate conclusions. For instance, Callison-Burch et al. (2006) and Koehn and Monz (2006) have recently reported several problematic cases related to the automatic evaluation of systems oriented towards maximizing different quality aspects. Corroborating the findings by Culy and Riehemann (2003) , they showed that BLEU overrates SMT systems with respect to other types of systems, such as rule-based, or human-aided. The reason is that SMT systems are likelier to match the sublanguage (e.g., lexical choice and order) represented by the set of reference translations. We argue that, in order to perform more robust, i.e., less biased, automatic MT evaluations, different quality dimensions should be jointly taken into account.",
"cite_spans": [
{
"start": 298,
"end": 326,
"text": "Callison-Burch et al. (2006)",
"ref_id": "BIBREF5"
},
{
"start": 331,
"end": 352,
"text": "Koehn and Monz (2006)",
"ref_id": "BIBREF13"
},
{
"start": 534,
"end": 559,
"text": "Culy and Riehemann (2003)",
"ref_id": "BIBREF7"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "A natural solution to this challenge consists in combining the scores conferred by different metrics, ideally covering a heterogeneous set of quality aspects. In the last few years, several approaches to metric combination have been suggested (Kulesza and Shieber, 2004; Liu and Gildea, 2007; Albrecht and Hwa, 2007a) . In spite of working on a limited set of quality aspects, mostly lexical features, these approaches have provided effective means of combining different metrics into a single measure of quality. All these methods implement a parametric combination scheme. Their models involve a number of parameters whose weight must be adjusted (see further details in Section 2).",
"cite_spans": [
{
"start": 243,
"end": 270,
"text": "(Kulesza and Shieber, 2004;",
"ref_id": "BIBREF14"
},
{
"start": 271,
"end": 292,
"text": "Liu and Gildea, 2007;",
"ref_id": "BIBREF19"
},
{
"start": 293,
"end": 317,
"text": "Albrecht and Hwa, 2007a)",
"ref_id": "BIBREF0"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "As an alternative path towards heterogeneous MT evaluation, in this work, we explore the possibility of relying on non-parametric combination schemes, in which metrics are combined without having to adjust their relative importance (see Section 3). We have studied their ability to integrate a wide set of metrics operating at different linguistic levels (e.g., lexical, syntactic and semantic) over several evaluation scenarios (see Section 4). We show that nonparametric schemes offer a valid means of putting different quality dimensions together, effectively yielding a significantly improved evaluation quality, both in terms of human likeness and human acceptability. We have also verified that these methods port well across test beds.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Approaches to metric combination require two important ingredients:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "Combination Scheme, i.e., how to combine several metric scores into a single score. As pointed out in Section 1, we distinguish between parametric and non-parametric schemes.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "Meta-Evaluation Criterion, i.e., how to evaluate the quality of a metric combination. The two most prominent meta-evaluation criteria are:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "\u2022 Human Acceptability: Metrics are evaluated in terms of their ability to capture the degree of acceptability to humans of automatic translations, i.e., their ability to emulate human assessors. The underlying assumption is that 'good' translations should be acceptable to human evaluators. Human acceptability is usually measured on the basis of correlation between automatic metric scores and human assessments of translation quality 2 . \u2022 Human Likeness: Metrics are evaluated in terms of their ability to capture the features which distinguish human from automatic translations. The underlying assumption is that 'good' translations should resemble human translations. Human likeness is usually measured on the basis of discriminative power (Lin and Och, 2004b; Amig\u00f3 et al., 2005 ).",
"cite_spans": [
{
"start": 745,
"end": 765,
"text": "(Lin and Och, 2004b;",
"ref_id": "BIBREF17"
},
{
"start": 766,
"end": 784,
"text": "Amig\u00f3 et al., 2005",
"ref_id": "BIBREF2"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "In the following, we describe the most relevant approaches to metric combination suggested in recent literature. All are parametric, and most of them are based on machine learning techniques. We distinguish between approaches relying on human likeness and approaches relying on human acceptability.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "The first approach to metric combination based on human likeness was that by Corston-Oliver et al. (2001) who used decision trees to distinguish between human-generated ('good') and machinegenerated ('bad') translations. They focused on evaluating only the well-formedness of automatic translations (i.e., subaspects of fluency), obtaining high levels of classification accuracy. Kulesza and Shieber (2004) extended the approach by Corston-Oliver et al. (2001) to take into account other aspects of quality further than fluency alone. Instead of decision trees, they trained Support Vector Machine (SVM) classifiers. They used features inspired by well-known metrics such as BLEU, NIST, WER, and PER. Metric quality was evaluated both in terms of classification accuracy and correlation with human assessments at the sentence level.",
"cite_spans": [
{
"start": 77,
"end": 105,
"text": "Corston-Oliver et al. (2001)",
"ref_id": "BIBREF6"
},
{
"start": 380,
"end": 406,
"text": "Kulesza and Shieber (2004)",
"ref_id": "BIBREF14"
},
{
"start": 432,
"end": 460,
"text": "Corston-Oliver et al. (2001)",
"ref_id": "BIBREF6"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Approaches based on Human Likeness",
"sec_num": "2.1"
},
{
"text": "A significant improvement with respect to standard individual metrics was reported. Gamon et al. (2005) presented a similar approach which, in addition, had the interesting property that the set of human and automatic translations could be independent, i.e., human translations were not required to correspond, as references, to the set of automatic translations.",
"cite_spans": [
{
"start": 84,
"end": 103,
"text": "Gamon et al. (2005)",
"ref_id": "BIBREF9"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Approaches based on Human Likeness",
"sec_num": "2.1"
},
{
"text": "Quirk 2004applied supervised machine learning algorithms (e.g., perceptrons, SVMs, decision trees, and linear regression) to approximate human quality judgements instead of distinguishing between human and automatic translations. Similarly to the work by Gamon et al. (2005) their approach does not require human references.",
"cite_spans": [
{
"start": 255,
"end": 274,
"text": "Gamon et al. (2005)",
"ref_id": "BIBREF9"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Approaches based on Human Acceptability",
"sec_num": "2.2"
},
{
"text": "More recently, Albrecht and Hwa (2007a; 2007b ) re-examined the SVM classification approach by Kulesza and Shieber (2004) and, inspired by the work of Quirk (2004) , suggested a regression-based learning approach to metric combination, with and without human references. The regression model learns a continuous function that approximates human assessments in training examples.",
"cite_spans": [
{
"start": 15,
"end": 39,
"text": "Albrecht and Hwa (2007a;",
"ref_id": "BIBREF0"
},
{
"start": 40,
"end": 45,
"text": "2007b",
"ref_id": "BIBREF1"
},
{
"start": 95,
"end": 121,
"text": "Kulesza and Shieber (2004)",
"ref_id": "BIBREF14"
},
{
"start": 151,
"end": 163,
"text": "Quirk (2004)",
"ref_id": "BIBREF26"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Approaches based on Human Acceptability",
"sec_num": "2.2"
},
{
"text": "As an alternative to methods based on machine learning techniques, Liu and Gildea (2007) suggested a simpler approach based on linear combinations of metrics. They followed a Maximum Correlation Training, i.e., the weight for the contribution of each metric to the overall score was adjusted so as to maximize the level of correlation with human assessments at the sentence level.",
"cite_spans": [
{
"start": 67,
"end": 88,
"text": "Liu and Gildea (2007)",
"ref_id": "BIBREF19"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Approaches based on Human Acceptability",
"sec_num": "2.2"
},
{
"text": "As expected, all approaches based on human acceptability have been shown to outperform that of Kulesza and Shieber (2004) in terms of human acceptability. However, no results in terms of human likeness have been provided, thus leaving these comparative studies incomplete.",
"cite_spans": [
{
"start": 95,
"end": 121,
"text": "Kulesza and Shieber (2004)",
"ref_id": "BIBREF14"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Approaches based on Human Acceptability",
"sec_num": "2.2"
},
{
"text": "In this section, we provide a brief description of the QARLA framework (Amig\u00f3 et al., 2005) , which is, to our knowledge, the only existing non-parametric approach to metric combination. QARLA is nonparametric because, rather than assigning a weight to the contribution of each metric, the evaluation of a given automatic output a is addressed through a set of independent probabilistic tests (one per metric) in which the goal is to falsify the hypothesis that a is a human reference. The input for QARLA is a set of test cases A (i.e., automatic translations), a set of similarity metrics X, and a set of models R (i.e., human references) for each test case. With such a testbed, QARLA provides the two essential ingredients required for metric combination:",
"cite_spans": [
{
"start": 71,
"end": 91,
"text": "(Amig\u00f3 et al., 2005)",
"ref_id": "BIBREF2"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Non-Parametric Combination Schemes",
"sec_num": "3"
},
{
"text": "Combination Scheme Metrics are combined inside the QUEEN measure. QUEEN operates under the unanimity principle, i.e., the assumption that a 'good' translation must be similar to all human references according to all metrics. QUEEN X (a) is defined as the probability, over R \u00d7 R \u00d7 R, that, for every metric in X, the automatic translation a is more similar to a human reference r than two other references, r and r , to each other. Formally:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Non-Parametric Combination Schemes",
"sec_num": "3"
},
{
"text": "QUEEN X,R (a) = P rob(\u2200x \u2208 X : x(a, r) \u2265 x(r , r ))",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Non-Parametric Combination Schemes",
"sec_num": "3"
},
{
"text": "where x(a, r) stands for the similarity between a and r according to the metric x. Thus, QUEEN allows us to combine different similarity metrics into a single measure, without having to adjust their relative importance. Besides, QUEEN offers two other important advantages which make it really suitable for metric combination: (i) it is robust against metric redundancy, i.e., metrics covering similar aspects of quality, and (ii) it is not affected by the scale properties of metrics. The main drawback of the QUEEN measure is that it requires at least three human references, when in most cases only a single reference translation is available.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Non-Parametric Combination Schemes",
"sec_num": "3"
},
{
"text": "Metric quality is evaluated using the KING measure of human likeness. All human references are assumed to be equally optimal and, while they are likely to be different, the best similarity metric is the one that identifies and uses the features that are common to all human references, grouping them and separating them from automatic translations. Based on QUEEN, KING represents the probability that a human reference does not receive a lower score than the score attained by any automatic translation. Formally:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Meta-evaluation Criterion",
"sec_num": null
},
{
"text": "KINGA,R(X) = P rob(\u2200a \u2208 A :",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Meta-evaluation Criterion",
"sec_num": null
},
{
"text": "QUEEN X,R\u2212{r} (r) \u2265 QUEEN X,R\u2212{r} (a))",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Meta-evaluation Criterion",
"sec_num": null
},
{
"text": "KING operates, therefore, on the basis of discriminative power. The closest measure to KING is ORANGE (Lin and Och, 2004b) , which is, however, not intended for the purpose of metric combination.",
"cite_spans": [
{
"start": 102,
"end": 122,
"text": "(Lin and Och, 2004b)",
"ref_id": "BIBREF17"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Meta-evaluation Criterion",
"sec_num": null
},
{
"text": "Apart from being non-parametric, QARLA exhibits another important feature which differentiates it form other approaches; besides considering the similarity between automatic translations and human references, QARLA also takes into account the distribution of similarities among human references.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Meta-evaluation Criterion",
"sec_num": null
},
{
"text": "However, QARLA is not well suited to port from human likeness to human acceptability. The reason is that QUEEN is, by definition, a very restrictive measure -a 'good' translation must be similar to all human references according to all metrics. Thus, as the number of metrics increases, it becomes easier to find a metric which does not satisfy the QUEEN assumption. This causes QUEEN values to get close to zero, which turns correlation with human assessments into an impractical meta-evaluation measure.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Meta-evaluation Criterion",
"sec_num": null
},
{
"text": "We have simulated a non-parametric scheme based on human acceptability by working on uniformly averaged linear combinations (ULC) of metrics. Our approach is similar to that of Liu and Gildea (2007) except that in our case all the metrics in the combination are equally important 3 . In other words, ULC is indeed a particular case of a parametric scheme, in which the contribution of each metric is not adjusted. Formally:",
"cite_spans": [
{
"start": 177,
"end": 198,
"text": "Liu and Gildea (2007)",
"ref_id": "BIBREF19"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Meta-evaluation Criterion",
"sec_num": null
},
{
"text": "ULC X (a, R) = 1 |X| x\u2208X x(a, R)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Meta-evaluation Criterion",
"sec_num": null
},
{
"text": "where X is the metric set, and x(a, R) is the similarity between the automatic translation a and the set of references R, for the given test case, according to the metric x. Since correlation with human assessments at the system level is vaguely informative (it is often estimated on very few system samples), we ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Meta-evaluation Criterion",
"sec_num": null
},
{
"text": "In this section, we study the behavior of the two combination schemes presented in Section 3 in the context of four different evaluation scenarios.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Experimental Work",
"sec_num": "4"
},
{
"text": "We use the test beds from the 2004 and 2005 NIST MT Evaluation Campaigns (Le and Przybocki, 2005) 4 . Both campaigns include two different translations exercises: Arabic-to-English ('AE') and Chinese-to-English ('CE'). Human assessments of adequacy and fluency are available for a subset of sentences, each evaluated by two different human judges. See, in Table 1 , a brief numerical description including the number of human references and system outputs available, as well as the number of sentences per output, and the number of system outputs and sentences per system assessed. For metric computation, we have used the IQMT v2.1, which includes metrics at different linguistic levels (lexical, shallow-syntactic, syntactic, shallowsemantic, and semantic). A detailed description may be found in (Gim\u00e9nez, 2007) 5 .",
"cite_spans": [
{
"start": 73,
"end": 99,
"text": "(Le and Przybocki, 2005) 4",
"ref_id": null
},
{
"start": 799,
"end": 814,
"text": "(Gim\u00e9nez, 2007)",
"ref_id": "BIBREF11"
}
],
"ref_spans": [
{
"start": 356,
"end": 363,
"text": "Table 1",
"ref_id": "TABREF1"
}
],
"eq_spans": [],
"section": "Experimental Settings",
"sec_num": "4.1"
},
{
"text": "Prior to studying the effects of metric combination, we study the isolated behaviour of individual metrics. We have selected a set of metric representatives from each linguistic level. The first observation is that the two metaevaluation criteria provide very similar metric quality rankings for a same test bed. This seems to indicate that there is a relationship between the two meta-evaluation criteria employed. We have confirmed this intuition by computing the Pearson correlation coefficient between values in columns 1 to 4 and their counterparts in columns 5 to 8. There exists a high correlation (R = 0.79).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluating Individual Metrics",
"sec_num": "4.2"
},
{
"text": "A second observation is that metric quality varies significantly from task to task. This is due to the significant differences among the test beds employed. These are related to three main aspects: language pair, translation domain, and system typology. For instance, notice that most metrics exhibit a lower quality in the case of the 'AE 05 ' test bed. The reason is that, while in the rest of test beds all systems are statistical, the 'AE 05 ' test bed presents the particularity of providing automatic translations produced by heterogeneous MT systems (i.e., systems belonging to different paradigms) 6 . The fact that most systems are statistical also explains why, in general, lexical metrics exhibit a higher quality. However, highest levels of quality are not in all cases attained by metrics at the lexical level (see highlighted values). In fact, there is only one metric, 'ROUGEW ' (based on lexical matching), which is consistently among the top-scoring in all test beds according to both meta-evaluation criteria. The underlying cause is simple: current metrics do not provide a global measure of quality, but account only for partial aspects of it. Apart from evincing the importance of the meta-evaluation process, these results strongly suggest the need for conducting heterogeneous MT evaluations. ",
"cite_spans": [
{
"start": 606,
"end": 607,
"text": "6",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluating Individual Metrics",
"sec_num": "4.2"
},
{
"text": "In that respect, we study the applicability of the two combination strategies presented. Optimal metric sets are determined by maximizing over the corresponding meta-evaluation measure (KING or R snt ). However, because exploring all possible combinations was not viable, we have used a simple algorithm which performs an approximate search. First, individual metrics are ranked according to their quality. Then, following that order, metrics are added to the optimal set only if in doing so the global quality increases. Since no training is required it has not been necessary to keep a held-out portion of the data for test (see Section 4.4 for further discussion).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Finding Optimal Metric Combinations",
"sec_num": "4.3"
},
{
"text": "Optimal metric sets are displayed in Table 3 . Inside each set, metrics are sorted in decreasing quality order. The 'Optimal Combination' line in Table 2 shows the quality attained by these sets, combined under QUEEN in the case of KING optimization, and under ULC in the case of optimizing over R snt . In most cases optimal sets consist of metrics operating at different linguistic levels, mostly at the lexical and syntactic levels. This is coherent with the findings in Section 4.2. Metrics at the semantic level are selected only in two cases, corresponding to the R snt optimization in 'AE 04 ' and 'CE 04 ' test beds. Also in two cases, corresponding to the KING optimization in 'AE 04 ' and 'CE 05 ' test beds, it has not been possible to find any metric combination which outperforms the best individual metric. This is not a discouraging result. After all, in these cases, the best metric alone achieves already a very high quality (0.79 and 0.70, respectively). The fact that a single feature suffices to discern between manual and automatic translations indicates that MT systems are easily distinguishable, possibly because of their low quality and/or because they are all based on the same translation paradigm.",
"cite_spans": [],
"ref_spans": [
{
"start": 37,
"end": 44,
"text": "Table 3",
"ref_id": "TABREF4"
},
{
"start": 146,
"end": 153,
"text": "Table 2",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Finding Optimal Metric Combinations",
"sec_num": "4.3"
},
{
"text": "It can be argued that metric set optimization is itself a training process; each metric would have an associated binary parameter controlling whether it is selected or not. For that reason, in Table 4 , we have analyzed the portability of optimal metric sets (i) across test beds and (ii) across combination strategies. As to portability across test beds (i.e., across language pairs and years), the reader must focus on the cells for which the meta-evaluation criterion guiding the metric set optimization matches the criterion used in the evaluation, i.e., the top-left and bottom-right 16-cell quadrangles. The fact that the 4 values in each subcolumn are in a very similar range confirms that optimal metric sets port well across test beds. We have also studied the portability of optimal metric sets across combination strategies. In other words, although QUEEN and ULC are thought to operate on metric combinations respectively optimized on the basis of human likeness and human acceptability, we have studied the effects of applying either measure over metric combinations optimized on the basis of the alternative metaevaluation criterion. In this case, the reader must compare top-left vs. bottom-left (KING) and topright vs. bottom-right (R snt ) 16-cell quadrangles. It can be clearly seen that optimal metric sets, in general, do not port well across meta-evaluation criteria, particularly from human likeness to human acceptability. However, interestingly, in the case of 'AE 05 ' (i.e., heterogeneous systems), the optimal metric set ports well from human acceptability to human likeness. We speculate that system heterogeneity has contributed positively for the sake of robustness.",
"cite_spans": [],
"ref_spans": [
{
"start": 193,
"end": 200,
"text": "Table 4",
"ref_id": null
}
],
"eq_spans": [],
"section": "Portability",
"sec_num": "4.4"
},
{
"text": "As an alternative to current parametric combination techniques, we have presented two different meth- Table 4 : Portability of combination strategies ods: a genuine non-parametric method based on human likeness, and a parametric method based human acceptability in which the parameter weights are set equiprobable. We have shown that both strategies may yield a significantly improved quality by combining metrics at different linguistic levels. Besides, we have shown that these methods generalize well across test beds. Thus, a valid path towards heterogeneous automatic MT evaluation has been traced. We strongly believe that future MT evaluation campaigns should benefit from these results specially for the purpose of comparing systems based on different paradigms. These techniques could also be used to build better MT systems by allowing system developers to perform more accurate error analyses and less biased adjustments of system parameters.",
"cite_spans": [],
"ref_spans": [
{
"start": 102,
"end": 109,
"text": "Table 4",
"ref_id": null
}
],
"eq_spans": [],
"section": "Conclusions",
"sec_num": "5"
},
{
"text": "As an additional result, we have found that there is a tight relationship between human acceptability and human likeness. This result, coherent with the findings by Amig\u00f3 et al. (2006) , suggests that the two criteria are interchangeable. This would be a point in favour of combination schemes based on human likeness, since human assessments -which are expensive to acquire, subjective and not reusableare not required. We also interpret this result as an indication that human assessors probably behave in many cases in a discriminative manner. For each test case, assessors would inspect the source sentence and the set of human references trying to identify the features which 'good' translations should comply with, for instance regarding adequacy and fluency. Then, they would evaluate automatic translations roughly according to the number and relevance of the features they share and the ones they do not.",
"cite_spans": [
{
"start": 165,
"end": 184,
"text": "Amig\u00f3 et al. (2006)",
"ref_id": "BIBREF3"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions",
"sec_num": "5"
},
{
"text": "For future work, we plan to study the integration of finer features as well as to conduct a rigorous comparison between parametric and non-parametric combination schemes. This may involve reproducing the works by Kulesza and Shieber (2004) and Albrecht and Hwa (2007a) . This would also allow us to evaluate their approaches in terms of both human likeness and human acceptability, and not only on the latter criterion as they have been evaluated so far.",
"cite_spans": [
{
"start": 213,
"end": 239,
"text": "Kulesza and Shieber (2004)",
"ref_id": "BIBREF14"
},
{
"start": 244,
"end": 268,
"text": "Albrecht and Hwa (2007a)",
"ref_id": "BIBREF0"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions",
"sec_num": "5"
},
{
"text": "ROUGE and METEOR may consider morphological variations. METEOR may also look up for synonyms in WordNet.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "Usually adequacy, fluency, or a combination of the two.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "That would be assuming that all metrics operate in the same range of values, which is not always the case.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "http://www.nist.gov/speech/tests/ summaries/2005/mt05.htm 5 The IQMT Framework may be freely downloaded from http://www.lsi.upc.edu/\u02dcnlp/IQMT.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "Specifically, all systems are statistical except one which is human-aided.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [
{
"text": "This research has been funded by the Spanish Ministry of Education and Science, project OpenMT (TIN2006-15307-C03-02). Our NLP group has been recognized as a Quality Research Group (2005 SGR-00130) by DURSI, the Research Department of the Catalan Government. We are thankful to Enrique Amig\u00f3, for his generous help and valuable comments. We are also grateful to the NIST MT Evaluation Campaign organizers, and participants who agreed to share their system outputs and human assessments for the purpose of this research.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Acknowledgements",
"sec_num": null
}
],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "A Reexamination of Machine Learning Approaches for Sentence-Level MT Evaluation",
"authors": [
{
"first": "Joshua",
"middle": [],
"last": "Albrecht",
"suffix": ""
},
{
"first": "Rebecca",
"middle": [],
"last": "Hwa",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of ACL",
"volume": "",
"issue": "",
"pages": "880--887",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Joshua Albrecht and Rebecca Hwa. 2007a. A Re- examination of Machine Learning Approaches for Sentence-Level MT Evaluation. In Proceedings of ACL, pages 880-887.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Regression for Sentence-Level MT Evaluation with Pseudo References",
"authors": [
{
"first": "Joshua",
"middle": [],
"last": "Albrecht",
"suffix": ""
},
{
"first": "Rebecca",
"middle": [],
"last": "Hwa",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of ACL",
"volume": "",
"issue": "",
"pages": "296--303",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Joshua Albrecht and Rebecca Hwa. 2007b. Regression for Sentence-Level MT Evaluation with Pseudo Refer- ences. In Proceedings of ACL, pages 296-303.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "QARLA: a Framework for the Evaluation of Automatic Sumarization",
"authors": [
{
"first": "Enrique",
"middle": [],
"last": "Amig\u00f3",
"suffix": ""
},
{
"first": "Julio",
"middle": [],
"last": "Gonzalo",
"suffix": ""
},
{
"first": "Anselmo",
"middle": [],
"last": "Pe\u00f1as",
"suffix": ""
},
{
"first": "Felisa",
"middle": [],
"last": "Verdejo",
"suffix": ""
}
],
"year": 2005,
"venue": "Proceedings of the 43th Annual Meeting of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Enrique Amig\u00f3, Julio Gonzalo, Anselmo Pe\u00f1as, and Fe- lisa Verdejo. 2005. QARLA: a Framework for the Evaluation of Automatic Sumarization. In Proceed- ings of the 43th Annual Meeting of the Association for Computational Linguistics.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "MT Evaluation: Human-Like vs. Human Acceptable",
"authors": [
{
"first": "Enrique",
"middle": [],
"last": "Amig\u00f3",
"suffix": ""
},
{
"first": "Jes\u00fas",
"middle": [],
"last": "Gim\u00e9nez",
"suffix": ""
},
{
"first": "Julio",
"middle": [],
"last": "Gonzalo",
"suffix": ""
},
{
"first": "Llu\u00eds",
"middle": [],
"last": "M\u00e0rquez",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of COLING-ACL06",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Enrique Amig\u00f3, Jes\u00fas Gim\u00e9nez, Julio Gonzalo, and Llu\u00eds M\u00e0rquez. 2006. MT Evaluation: Human-Like vs. Hu- man Acceptable. In Proceedings of COLING-ACL06.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "METEOR: An Automatic Metric for MT Evaluation with Improved Correlation with Human Judgments",
"authors": [
{
"first": "Satanjeev",
"middle": [],
"last": "Banerjee",
"suffix": ""
},
{
"first": "Alon",
"middle": [],
"last": "Lavie",
"suffix": ""
}
],
"year": 2005,
"venue": "Proceedings of ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Satanjeev Banerjee and Alon Lavie. 2005. METEOR: An Automatic Metric for MT Evaluation with Im- proved Correlation with Human Judgments. In Pro- ceedings of ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Re-evaluating the Role of BLEU in Machine Translation Research",
"authors": [
{
"first": "Chris",
"middle": [],
"last": "Callison",
"suffix": ""
},
{
"first": "-",
"middle": [],
"last": "Burch",
"suffix": ""
},
{
"first": "Miles",
"middle": [],
"last": "Osborne",
"suffix": ""
},
{
"first": "Philipp",
"middle": [],
"last": "Koehn",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of EACL",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Chris Callison-Burch, Miles Osborne, and Philipp Koehn. 2006. Re-evaluating the Role of BLEU in Ma- chine Translation Research. In Proceedings of EACL.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "A Machine Learning Approach to the Automatic Evaluation of Machine Translation",
"authors": [
{
"first": "Simon",
"middle": [],
"last": "Corston",
"suffix": ""
},
{
"first": "-",
"middle": [],
"last": "Oliver",
"suffix": ""
},
{
"first": "Michael",
"middle": [],
"last": "Gamon",
"suffix": ""
},
{
"first": "Chris",
"middle": [],
"last": "Brockett",
"suffix": ""
}
],
"year": 2001,
"venue": "Proceedings of ACL",
"volume": "",
"issue": "",
"pages": "140--147",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Simon Corston-Oliver, Michael Gamon, and Chris Brockett. 2001. A Machine Learning Approach to the Automatic Evaluation of Machine Translation. In Proceedings of ACL, pages 140-147.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "The Limits of N-gram Translation Evaluation Metrics",
"authors": [
{
"first": "Christopher",
"middle": [],
"last": "Culy",
"suffix": ""
},
{
"first": "Susanne",
"middle": [
"Z"
],
"last": "Riehemann",
"suffix": ""
}
],
"year": 2003,
"venue": "Proceedings of MT-SUMMIT IX",
"volume": "",
"issue": "",
"pages": "1--8",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Christopher Culy and Susanne Z. Riehemann. 2003. The Limits of N-gram Translation Evaluation Metrics. In Proceedings of MT-SUMMIT IX, pages 1-8.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Automatic Evaluation of Machine Translation Quality Using N-gram Co-Occurrence Statistics",
"authors": [
{
"first": "George",
"middle": [],
"last": "Doddington",
"suffix": ""
}
],
"year": 2002,
"venue": "Proceedings of the 2nd IHLT",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "George Doddington. 2002. Automatic Evaluation of Machine Translation Quality Using N-gram Co- Occurrence Statistics. In Proceedings of the 2nd IHLT.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Sentence-Level MT evaluation without reference translations: beyond language modeling",
"authors": [
{
"first": "Michael",
"middle": [],
"last": "Gamon",
"suffix": ""
},
{
"first": "Anthony",
"middle": [],
"last": "Aue",
"suffix": ""
},
{
"first": "Martine",
"middle": [],
"last": "Smets",
"suffix": ""
}
],
"year": 2005,
"venue": "Proceedings of EAMT",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Michael Gamon, Anthony Aue, and Martine Smets. 2005. Sentence-Level MT evaluation without refer- ence translations: beyond language modeling. In Pro- ceedings of EAMT.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Linguistic Features for Automatic Evaluation of Heterogeneous MT Systems",
"authors": [
{
"first": "Jes\u00fas",
"middle": [],
"last": "Gim\u00e9nez",
"suffix": ""
},
{
"first": "Llu\u00eds",
"middle": [],
"last": "M\u00e0rquez",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of the ACL Workshop on Statistical Machine Translation",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jes\u00fas Gim\u00e9nez and Llu\u00eds M\u00e0rquez. 2007. Linguistic Features for Automatic Evaluation of Heterogeneous MT Systems. In Proceedings of the ACL Workshop on Statistical Machine Translation.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "IQMT v 2.1. Technical Manual",
"authors": [
{
"first": "Jes\u00fas",
"middle": [],
"last": "Gim\u00e9nez",
"suffix": ""
}
],
"year": 2007,
"venue": "TALP Research Center. LSI Department",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jes\u00fas Gim\u00e9nez. 2007. IQMT v 2.1. Technical Manual. Technical report, TALP Research Center. LSI Department. http://www.lsi.upc.edu/\u02dcnlp/IQMT/- IQMT.v2.1.pdf.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Paraphrasing for Automatic Evaluation",
"authors": [
{
"first": "David",
"middle": [],
"last": "Kauchak",
"suffix": ""
},
{
"first": "Regina",
"middle": [],
"last": "Barzilay",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of NLH-NAACL",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "David Kauchak and Regina Barzilay. 2006. Paraphras- ing for Automatic Evaluation. In Proceedings of NLH- NAACL.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Manual and Automatic Evaluation of Machine Translation between European Languages",
"authors": [
{
"first": "Philipp",
"middle": [],
"last": "Koehn",
"suffix": ""
},
{
"first": "Christof",
"middle": [],
"last": "Monz",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of the Workshop on Statistical Machine Translation",
"volume": "",
"issue": "",
"pages": "102--121",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Philipp Koehn and Christof Monz. 2006. Manual and Automatic Evaluation of Machine Translation between European Languages. In Proceedings of the Workshop on Statistical Machine Translation, pages 102-121.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "A learning approach to improving sentence-level MT evaluation",
"authors": [
{
"first": "Alex",
"middle": [],
"last": "Kulesza",
"suffix": ""
},
{
"first": "Stuart",
"middle": [
"M"
],
"last": "Shieber",
"suffix": ""
}
],
"year": 2004,
"venue": "Proceedings of the 10th International Conference on Theoretical and Methodological Issues in Machine Translation",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Alex Kulesza and Stuart M. Shieber. 2004. A learning approach to improving sentence-level MT evaluation. In Proceedings of the 10th International Conference on Theoretical and Methodological Issues in Machine Translation.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "NIST 2005 machine translation evaluation official results",
"authors": [
{
"first": "Audrey",
"middle": [],
"last": "Le",
"suffix": ""
},
{
"first": "Mark",
"middle": [],
"last": "Przybocki",
"suffix": ""
}
],
"year": 2005,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Audrey Le and Mark Przybocki. 2005. NIST 2005 ma- chine translation evaluation official results. Technical report, NIST, August.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "Automatic Evaluation of Machine Translation Quality Using Longest Common Subsequence and Skip-Bigram Statics",
"authors": [
{
"first": "Chin-Yew",
"middle": [],
"last": "Lin",
"suffix": ""
},
{
"first": "Franz Josef",
"middle": [],
"last": "Och",
"suffix": ""
}
],
"year": 2004,
"venue": "Proceedings of ACL",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Chin-Yew Lin and Franz Josef Och. 2004a. Auto- matic Evaluation of Machine Translation Quality Us- ing Longest Common Subsequence and Skip-Bigram Statics. In Proceedings of ACL.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "ORANGE: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
"authors": [
{
"first": "Chin-Yew",
"middle": [],
"last": "Lin",
"suffix": ""
},
{
"first": "Franz Josef",
"middle": [],
"last": "Och",
"suffix": ""
}
],
"year": 2004,
"venue": "Proceedings of COLING",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Chin-Yew Lin and Franz Josef Och. 2004b. ORANGE: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation. In Proceedings of COLING.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "Syntactic Features for Evaluation of Machine Translation",
"authors": [
{
"first": "Ding",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Daniel",
"middle": [],
"last": "Gildea",
"suffix": ""
}
],
"year": 2005,
"venue": "Proceedings of ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Ding Liu and Daniel Gildea. 2005. Syntactic Features for Evaluation of Machine Translation. In Proceed- ings of ACL Workshop on Intrinsic and Extrinsic Eval- uation Measures for Machine Translation and/or Sum- marization.",
"links": null
},
"BIBREF19": {
"ref_id": "b19",
"title": "Source-Language Features and Maximum Correlation Training for Machine Translation Evaluation",
"authors": [
{
"first": "Ding",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Daniel",
"middle": [],
"last": "Gildea",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of the 2007 Meeting of the North American chapter of the Association for Computational Linguistics (NAACL-07)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Ding Liu and Daniel Gildea. 2007. Source-Language Features and Maximum Correlation Training for Ma- chine Translation Evaluation. In Proceedings of the 2007 Meeting of the North American chapter of the As- sociation for Computational Linguistics (NAACL-07).",
"links": null
},
"BIBREF20": {
"ref_id": "b20",
"title": "BLEUATRE: Flattening Syntactic Dependencies for MT Evaluation",
"authors": [
{
"first": "Dennis",
"middle": [],
"last": "Mehay",
"suffix": ""
},
{
"first": "Chris",
"middle": [],
"last": "Brew",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of the 11th Conference on Theoretical and Methodological Issues in Machine Translation (TMI)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Dennis Mehay and Chris Brew. 2007. BLEUATRE: Flattening Syntactic Dependencies for MT Evaluation. In Proceedings of the 11th Conference on Theoreti- cal and Methodological Issues in Machine Translation (TMI).",
"links": null
},
"BIBREF21": {
"ref_id": "b21",
"title": "Precision and Recall of Machine Translation",
"authors": [
{
"first": "I",
"middle": [],
"last": "",
"suffix": ""
},
{
"first": "Dan",
"middle": [],
"last": "Melamed",
"suffix": ""
},
{
"first": "Ryan",
"middle": [],
"last": "Green",
"suffix": ""
},
{
"first": "Joseph",
"middle": [
"P"
],
"last": "Turian",
"suffix": ""
}
],
"year": 2003,
"venue": "Proceedings of HLT/NAACL",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "I. Dan Melamed, Ryan Green, and Joseph P. Turian. 2003. Precision and Recall of Machine Translation. In Proceedings of HLT/NAACL.",
"links": null
},
"BIBREF22": {
"ref_id": "b22",
"title": "Evaluation Tool for Machine Translation: Fast Evaluation for MT Research",
"authors": [
{
"first": "Sonja",
"middle": [],
"last": "Nie\u00dfen",
"suffix": ""
},
{
"first": "Franz",
"middle": [
"Josef"
],
"last": "Och",
"suffix": ""
},
{
"first": "Gregor",
"middle": [],
"last": "Leusch",
"suffix": ""
},
{
"first": "Hermann",
"middle": [],
"last": "Ney",
"suffix": ""
}
],
"year": 2000,
"venue": "Proceedings of the 2nd LREC",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Sonja Nie\u00dfen, Franz Josef Och, Gregor Leusch, and Her- mann Ney. 2000. Evaluation Tool for Machine Trans- lation: Fast Evaluation for MT Research. In Proceed- ings of the 2nd LREC.",
"links": null
},
"BIBREF23": {
"ref_id": "b23",
"title": "Contextual Bitext-Derived Paraphrases in Automatic MT Evaluation",
"authors": [
{
"first": "Karolina",
"middle": [],
"last": "Owczarzak",
"suffix": ""
},
{
"first": "Declan",
"middle": [],
"last": "Groves",
"suffix": ""
},
{
"first": "Josef",
"middle": [],
"last": "Van Genabith",
"suffix": ""
},
{
"first": "Andy",
"middle": [],
"last": "Way",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas (AMTA)",
"volume": "",
"issue": "",
"pages": "148--155",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Karolina Owczarzak, Declan Groves, Josef Van Gen- abith, and Andy Way. 2006. Contextual Bitext- Derived Paraphrases in Automatic MT Evaluation. In Proceedings of the 7th Conference of the Associa- tion for Machine Translation in the Americas (AMTA), pages 148-155.",
"links": null
},
"BIBREF24": {
"ref_id": "b24",
"title": "Dependency-Based Automatic Evaluation for Machine Translation",
"authors": [
{
"first": "Karolina",
"middle": [],
"last": "Owczarzak",
"suffix": ""
},
{
"first": "Josef",
"middle": [],
"last": "Van Genabith",
"suffix": ""
},
{
"first": "Andy",
"middle": [],
"last": "Way",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of SSST, NAACL-HLT/AMTA Workshop on Syntax and Structure in Statistical Translation",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Karolina Owczarzak, Josef van Genabith, and Andy Way. 2007. Dependency-Based Automatic Evalua- tion for Machine Translation. In Proceedings of SSST, NAACL-HLT/AMTA Workshop on Syntax and Struc- ture in Statistical Translation.",
"links": null
},
"BIBREF25": {
"ref_id": "b25",
"title": "Bleu: a method for automatic evaluation of machine translation, RC22176, IBM",
"authors": [
{
"first": "Kishore",
"middle": [],
"last": "Papineni",
"suffix": ""
},
{
"first": "Salim",
"middle": [],
"last": "Roukos",
"suffix": ""
},
{
"first": "Todd",
"middle": [],
"last": "Ward",
"suffix": ""
},
{
"first": "Wei-Jing",
"middle": [],
"last": "Zhu",
"suffix": ""
}
],
"year": 2001,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2001. Bleu: a method for automatic evalu- ation of machine translation, RC22176, IBM. Techni- cal report, IBM T.J. Watson Research Center.",
"links": null
},
"BIBREF26": {
"ref_id": "b26",
"title": "Training a Sentence-Level Machine Translation Confidence Metric",
"authors": [
{
"first": "Chris",
"middle": [],
"last": "Quirk",
"suffix": ""
}
],
"year": 2004,
"venue": "Proceedings of LREC",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Chris Quirk. 2004. Training a Sentence-Level Ma- chine Translation Confidence Metric. In Proceedings of LREC.",
"links": null
},
"BIBREF27": {
"ref_id": "b27",
"title": "A Study of Translation Edit Rate with Targeted Human Annotation",
"authors": [
{
"first": "Matthew",
"middle": [],
"last": "Snover",
"suffix": ""
},
{
"first": "Bonnie",
"middle": [],
"last": "Dorr",
"suffix": ""
},
{
"first": "Richard",
"middle": [],
"last": "Schwartz",
"suffix": ""
},
{
"first": "Linnea",
"middle": [],
"last": "Micciulla",
"suffix": ""
},
{
"first": "John",
"middle": [],
"last": "Makhoul",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of AMTA",
"volume": "",
"issue": "",
"pages": "223--231",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Matthew Snover, Bonnie Dorr, Richard Schwartz, Lin- nea Micciulla, , and John Makhoul. 2006. A Study of Translation Edit Rate with Targeted Human Anno- tation. In Proceedings of AMTA, pages 223-231.",
"links": null
},
"BIBREF28": {
"ref_id": "b28",
"title": "Re-evaluating Machine Translation Results with Paraphrase Support",
"authors": [
{
"first": "Liang",
"middle": [],
"last": "Zhou",
"suffix": ""
},
{
"first": "Chin-Yew",
"middle": [],
"last": "Lin",
"suffix": ""
},
{
"first": "Eduard",
"middle": [],
"last": "Hovy",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of EMNLP",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Liang Zhou, Chin-Yew Lin, and Eduard Hovy. 2006. Re-evaluating Machine Translation Results with Para- phrase Support. In Proceedings of EMNLP.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"text": "Opt.K(AE.04) = {SP-NISTp} Opt.K(CE.04) = {ROUGEW , SP-NISTp, ROUGEL} Opt.K(AE.05) = {METEORwnsyn, SP-NISTp, DP-Or-*} Opt.K(CE.05) = {SP-NISTp} Opt.R(AE.04) = {ROUGEW , ROUGEL, CP-Oc-*, METEORwnsyn, DP-Or-*, DP-O l -*, GTM.e2, DR-Or-*, CP-STM} Opt.R(CE.04) = {ROUGEL, CP-Oc-*, ROUGEW , SP-Op-*, METEORwnsyn, DP-Or-*, GTM.e2, 1-WER, DR-Or-*} Opt.R(AE.05) = {DP-Or-*, ROUGEW } Opt.R(CE.05) = {ROUGEW , ROUGEL, DP GTM.e2,",
"type_str": "figure",
"num": null,
"uris": null
},
"TABREF1": {
"type_str": "table",
"text": "",
"num": null,
"content": "<table><tr><td>: Description of the test beds</td></tr><tr><td>evaluate metric quality in terms of correlation with</td></tr><tr><td>human assessments at the sentence level (R snt ). We</td></tr><tr><td>use the sum of adequacy and fluency to simulate a</td></tr><tr><td>global assessment of quality.</td></tr></table>",
"html": null
},
"TABREF2": {
"type_str": "table",
"text": "",
"num": null,
"content": "<table><tr><td>shows meta-</td></tr></table>",
"html": null
},
"TABREF3": {
"type_str": "table",
"text": "",
"num": null,
"content": "<table><tr><td>: Metric Meta-evaluation</td></tr></table>",
"html": null
},
"TABREF4": {
"type_str": "table",
"text": "Optimal metric sets",
"num": null,
"content": "<table/>",
"html": null
}
}
}
}