|
{ |
|
"paper_id": "2005", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:20:28.858951Z" |
|
}, |
|
"title": "Sehda S 2 MT: Incorporation of Syntax into Statistical Translation System", |
|
"authors": [ |
|
{ |
|
"first": "Yookyung", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Sehda Inc", |
|
"location": { |
|
"addrLine": "465 N. Fairchild Drive, Suite 123 Mountain View", |
|
"postCode": "94043", |
|
"region": "CA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "kim@sehda.com" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Sehda Inc", |
|
"location": { |
|
"addrLine": "465 N. Fairchild Drive, Suite 123 Mountain View", |
|
"postCode": "94043", |
|
"region": "CA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Youssef", |
|
"middle": [], |
|
"last": "Billawala", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Sehda Inc", |
|
"location": { |
|
"addrLine": "465 N. Fairchild Drive, Suite 123 Mountain View", |
|
"postCode": "94043", |
|
"region": "CA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Farzad", |
|
"middle": [], |
|
"last": "Ehsani", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Sehda Inc", |
|
"location": { |
|
"addrLine": "465 N. Fairchild Drive, Suite 123 Mountain View", |
|
"postCode": "94043", |
|
"region": "CA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper describes Sehda's S 2 MT (Syntactic Statistical Machine Translation) system submitted to the Korean-English track in the evaluation campaign of the IWSLT-05 workshop. The S 2 MT is a phrase-based statistical system trained on linguistically processed parallel data.", |
|
"pdf_parse": { |
|
"paper_id": "2005", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper describes Sehda's S 2 MT (Syntactic Statistical Machine Translation) system submitted to the Korean-English track in the evaluation campaign of the IWSLT-05 workshop. The S 2 MT is a phrase-based statistical system trained on linguistically processed parallel data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Sehda's S 2 MT (Syntactic Statistical Machine Translation) system is a hybrid system which incorporates linguistic knowledge into statistical learning. The system learns phraseto-phrase mapping and syntactic ordering separately. A feasibility test of the system is performed on the translation task presented by International Workshop on Spoken Language Translation (IWSLT) for Korean-to-English \"Supplied+Tools\" data track. We show that syntactic phrases are useful units to handle the structural problems of statistical Machine Translation and reduce the need for huge parallel text corpora.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "Our system capitalizes on the intuition that language is broadly divided into two levels: structure and vocabulary. Structure is the syntax or relation among phrases that govern the formation of complex structures in a language. Vocabulary is the word-level representation of individual concepts in a language. In traditional approaches to Statistical Machine Translation (SMT), the system learns both types of information simultaneously. By separating the acquisition of structural information from the acquisition of vocabulary, however, an SMT system can learn both levels more easily and more efficiently. By modifying the existing corpus to isolate structure and vocabulary, we are able to take full advantage of all of the information content of the bilingual corpus, ultimately producing higher quality machine translation with less training data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview of S 2 MT", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "We separate the two levels of translation information by \"chunking\" [1] [2] the sentences in the bilingual corpus. Chunking is the process of separating the sentences into contiguous, structurally significant groups, such as noun phrases, verbal clusters, and prepositional phrases. 1 In contrast to full syntactic parsing employed in [5] [6] , chunking is flexible enough to handle the ungrammaticalities of 1 We use \"chunks\" and \"phrases\" interchangeably, unless otherwise noted.", |
|
"cite_spans": [ |
|
{ |
|
"start": 68, |
|
"end": 71, |
|
"text": "[1]", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 72, |
|
"end": 75, |
|
"text": "[2]", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 283, |
|
"end": 284, |
|
"text": "1", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 335, |
|
"end": 338, |
|
"text": "[5]", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 339, |
|
"end": 342, |
|
"text": "[6]", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 409, |
|
"end": 410, |
|
"text": "1", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview of S 2 MT", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "conversational data and provide us with syntactic information useful in handling structural issues [3] .", |
|
"cite_spans": [ |
|
{ |
|
"start": 99, |
|
"end": 102, |
|
"text": "[3]", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview of S 2 MT", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "Two learning passes are then performed 2 : one at the sentence level composed of phrase sequences to handle phrase reordering, and the other at the phrase level composed of word sequences to learn phrase translation properties. The results of the two learning passes are merged in the decoding step to produce translations, as shown in Figure 1 . The system is composed of four modules:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 336, |
|
"end": 344, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Overview of S 2 MT", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "(1) linguistic analyzer (2) phrase translation module (3) phrase reordering module (4) decoding module which integrates phrases into a sentence", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview of S 2 MT", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "The linguistic analyzer consists of a parts-of-speech (POS) tagger, a morphological analyzer 3 , a chunk parser, and a head word extractor. Except for a Korean morphological analyzer and Brill's English POS tagger, we use in-house tools. The head word extractor extracts the syntactic head word from a phrase to build a language model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 93, |
|
"end": 94, |
|
"text": "3", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview of S 2 MT", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "Phrase translation is done in two ways: (i) directly using phrase-to-phrase mapping; and (ii) statistically using IBM model-4 with training on the aligned phrases instead of aligned sentences. This is explained in greater detail below.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview of S 2 MT", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "The significant word-order differences between Korean and English present a serious challenge to canonical SMT systems. In the S 2 MT system, sentences are segmented into linguistically motivated phrases, which act as the fundamental units for reordering.", |
|
"cite_spans": [ |
|
{ |
|
"start": 129, |
|
"end": 130, |
|
"text": "2", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview of S 2 MT", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "The system produces several candidate translations for each phrase, and they are decoded using linguistically augmented language models in conjunction with probabilities of different phrase orders, learned at the sentence level. Since each phrase is translated individually, without contextual information, it is vital to find a mechanism to communicate between phrases to find the best overall translation of the target sentence. We conducted a number of experiments using a variety of language modeling schemes, including the use of the lexical head of each phrase along with the syntactic type of the phrase.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview of S 2 MT", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "Word alignment based on parallel sentences plays an important role in SMT and acts as the first step in chunk alignment in Sehda's two-level approach. This alignment further generates a lexicon model necessary in subsequent processing.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Alignment", |
|
"sec_num": "2.1." |
|
}, |
|
{ |
|
"text": "We propose a learning algorithm to perform joint estimation of word alignment and lexicon model. In our approach, we first use GIZA++ to generate IBM model-based word alignments in both directions: Korean-to-English and Englishto-Korean. We then construct an initial estimation of the probabilistic bi-lingual lexicon model based on the intersection or union of the GIZA++ word alignments. We use this lexicon model as the initial parameter set for our word re-alignment task. A maximum likelihood (ML) algorithm is further introduced using several different statistical sourcetarget word translation models. The new word alignment is used as the source for the re-estimation of the lexicon model in the next iteration. We perform the joint estimation of the lexicon model and word alignment in an iterative fashion until a certain threshold criterion is reached.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Alignment", |
|
"sec_num": "2.1." |
|
}, |
|
{ |
|
"text": "In contrast to IBM models [7] , our algorithm combines different lexicon model estimation approaches with the ML word alignment during each iteration of the model training. As a result, our system is more flexible in terms of the integration of the lexicon model and the word alignment during the recursive estimation, and thus can improve both predictability and precision of the estimated lexicon model and word alignment. Different probabilistic models are introduced in order to estimate the associativity between the source and target words. As a result, our approach is capable of increasing the recall ratio of word alignment and the lexicon size without decreasing the alignment precision, which is especially important for applications with limited training parallel corpus. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 26, |
|
"end": 29, |
|
"text": "[7]", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Alignment", |
|
"sec_num": "2.1." |
|
}, |
|
{ |
|
"text": ") | , ( max arg * s A t p A L A r r \u03a6 \u2208 = (1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Given a source sentence", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where L \u03a6 denotes the set of all possible alignment matrices subject to the lexical constraints.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Given a source sentence", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The conditional probability of a target sentence generated by a source sentence depends on the lexicon translation model. We explored different ways to model the lexicon translation probability using the source-target word co-occurrence frequency, context information from the parallel sentence, and the alignment constraints. During each iteration of the word alignment, the lexical translation probabilities for each sentence pair are re-estimated using the lexical model learned from previous iterations, and the specific source-target word pairs occurring in the sentence. Furthermore, we introduced two types of lexical rules into word alignment. The first rule set consists of Korean case marking words, which should be aligned to the NULL word. The second rule set contains some incorrectly aligned bi-lingual lexicons generated from initial GIZA++ word alignment. Table 1 shows the comparison of word alignment performance between GIZA++ and Sehda's re-alignment tool. Realignment improves both precision and recall when both sentence context information and knowledge-based alignment constraints based on the union of GIZA++ initial alignments are used [8] . By combining the initial alignments with the context information and constraints, we achieve higher precision at the expense of only a slight decrease in recall. For chunk-based MT, the first realignment algorithm yields better translation results in terms of BLEU and NIST scores. This might be due to the fact that chunk coverage is more critical for our S 2 MT system. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 1162, |
|
"end": 1165, |
|
"text": "[8]", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 872, |
|
"end": 879, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Given a source sentence", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To allow the two-level learning, both English and Korean sentences are segmented into syntactically meaningful phrases independently and the chunks are aligned. The resulting chunk alignment serves as the training data for statistical chunk translation as well as the direct phrase-to-phrase translation table. In the submitted system, we use a word alignment based method. If at least one word of a chunk in the source language is aligned to a word in a chunk in the target language by the improved word alignment using the two directions of GIZA++ training result, the two chunks are aligned.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Chunk Alignment", |
|
"sec_num": "2.2." |
|
}, |
|
{ |
|
"text": "A crucial difference between the S 2 MT and other phrase based statistical MT model such as the Alignment Template model [9] is that the source and target languages are independently chunk-parsed to find syntactically and semantically meaningful phrases in each language and then alignment is performed. In contrast to word alignment, chunk alignment tends to be one-to-one, and the non-monotonic alignments are still within manageable distance, as shown in Figure 2 . In Figure 2 , black represents word alignment and gray represent chunk alignment. As chunk alignment is guided by word alignment, gray areas include black squares. The chunk boundaries are defined in each language first, thus \"go straight\" is aligned to \"\uace7\ubc14\ub85c\uac00\uc2dc\" even though there is no word alignment between \"straight\" and \"\uace7\ubc14\ub85c\".", |
|
"cite_spans": [ |
|
{ |
|
"start": 121, |
|
"end": 124, |
|
"text": "[9]", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 458, |
|
"end": 466, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 472, |
|
"end": 480, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Chunk Alignment", |
|
"sec_num": "2.2." |
|
}, |
|
{ |
|
"text": "Of End The To Straight Go \ud640 \uc758 \ub05d \uae4c \uc9c0 \uace7 \ubc14 \ub85c \uac00 \uc2dc Figure 2: Example of Chunk Alignment", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hall The", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The chunking is performed using Sehda's rule-based chunk parser. The parser affords flexibility to accommodate idiosyncrasies of the language pair. For instance, Korean has a \"missing argument\" problem; pronouns are freely dropped as long as the reference can be resolved in context [10] .", |
|
"cite_spans": [ |
|
{ |
|
"start": 283, |
|
"end": 287, |
|
"text": "[10]", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hall The", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Missing pronouns must therefore be reintroduced when translating into English. By combining pronouns and verbal clusters in English into one chunk, the Korean verbal cluster with missing pronoun can be aligned to this chunk 4 as illustrated in Table 2 . Combining pronouns and verbal clusters in English into one chunk can be harmful if an explicit pronoun appears in the Korean text. However, since verbal clusters in English will not be combined with non-pronoun NP subjects, there are simple verbal clusters available as translation candidates in addition to pronoun-verbal clusters. In more formal texts such as news paper articles where there are not many cases of dropped arguments, we find it is not necessary to combine subject pronouns and verbal clusters.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 244, |
|
"end": 251, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Hall The", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In the submitted system, we use only the improved word alignment from two directions of GIZA++ training, but there are many other possibilities to improve chunk alignment using dictionary and head-word alignments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hall The", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In our two-level leaning model, phrases are translated independently first, and then the best phrase is chosen among several candidate translations within context and phrases are reordered. In this section, we discuss the methods that we developed for phrase translation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Phrase Translation", |
|
"sec_num": "2.3." |
|
}, |
|
{ |
|
"text": "We apply two methods of phrase translation:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Phrase Translation", |
|
"sec_num": "2.3." |
|
}, |
|
{ |
|
"text": "(1) direct phrase-to-phrase translation resulting from the chunk alignment,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Phrase Translation", |
|
"sec_num": "2.3." |
|
}, |
|
{ |
|
"text": "statistical translation using GIZA++ training on aligned chunks and ReWrite decoder.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Phrase Translation", |
|
"sec_num": "2.3." |
|
}, |
|
{ |
|
"text": "The direct phrase translation uses the phrase translation model with probability constructed from the chunk alignment. The phrase translation probability is estimated by the cooccurrence frequency of the source-target chunk, and the unigram frequency of the source chunk from chunk alignment table. Direct phrase translation has the advantage of handling both word order within phrases as well as translations of noncompositional expressions, which covers many translation divergences [11] . While the quality of direct phrase translation is very high, the coverage may be low, as it depends on the size and overlap of the training corpus. Several ways of chunking with different rules are tested to construct a better direct phrase translation table to balance quality and coverage. For the IWSLT development set, we achieved a coverage of 72%. The coverage is defined as the ratio of non-punctuation chunks which are translatable by direct translation to the total non-punctuation chunks in the development set.", |
|
"cite_spans": [ |
|
{ |
|
"start": 485, |
|
"end": 489, |
|
"text": "[11]", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Phrase Translation", |
|
"sec_num": "2.3." |
|
}, |
|
{ |
|
"text": "The second method (ReWrite) [12] makes use of the preexisting decoder ReWrite and IBM model-4. Though the model and decoding program have already been developed by IBM, our training material is different from IBM's. The system learns word translation probability from aligned chunk phrases instead of entire aligned sentences. Since chunk phrases on average consist of 2.5 words, 5 the complexity to learn word translation probabilities will be reduced significantly. This is important because the traditional translation statistics must consider the probability of every word in the input sentence mapping onto every word in the output sentence. In common training data it is not unusual to see sentences of 30 or more words in the written texts, so this is a significant number of probabilities to consider. If we can perform the word-level training on aligned chunks instead of aligned sentences, then the simplification of the translation model will be significant. Hence more accurate mapping is expected without an increase in size of the bilingual corpus. In addition, the translation is to produce a chunk rather than the entire sentence at this step, thus a better distortion model is learned when we train using aligned chunks instead of the aligned sentences.", |
|
"cite_spans": [ |
|
{ |
|
"start": 28, |
|
"end": 32, |
|
"text": "[12]", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Phrase Translation", |
|
"sec_num": "2.3." |
|
}, |
|
{ |
|
"text": "One consideration when using aligned chunks instead of aligned sentences is that we may lose some training data due to incorrect chunk alignment. This is particularly true for small training corpora, as is the case with the IWSLT evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Phrase Translation", |
|
"sec_num": "2.3." |
|
}, |
|
{ |
|
"text": "To overcome this problem, both the aligned chunks and aligned sentences are used, but only the English chunks are used in language modeling for phrase translation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Phrase Translation", |
|
"sec_num": "2.3." |
|
}, |
|
{ |
|
"text": "In general, the translation quality of phrases by this purely statistical model (ReWrite) is worse than the direct phrase translation, as illustrated in Table 3 , though coverage is close to 100% except for instances of unknown words. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 153, |
|
"end": 160, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Phrase Translation", |
|
"sec_num": "2.3." |
|
}, |
|
{ |
|
"text": "Word-based SMT works poorly for language pairs as Korean and English which are structurally very different, as the distortion models are capable of handling only local movement of words. The proposed model's unit of reordering is the syntactic phrase. The performance of reordering in our model is superior to word-based SMT both in quality and speed due to the reduction in search space. To evaluate the reordering per se, we first used the ideal translation of phrases that are found from reference translations. Table 4 , \"before\" indicates the English phrases in Korean word order, and \"after\" the result of reordering, which is in English word order. We model the phrase reordering problem as the combination of traveling salesman problem (TSP) and global search of the ordering of the target language phrases. The TSP problem is an optimization problem which tries to find the path to cover all the nodes in a direct graph with certain defined cost function. For phrase re-ordering of machine translation, we use the language model (LM) score between contiguous chunks as the transitional cost between two phrases. Our LM score is obtained through the log-linear interpolation of an n-gram based lexicon LM, and an n-gram based phrase chunk head LM. We use a 3-gram LM with Good-Turing discounting to train the target language LM. The LM training data is the English part of IWSLT05's training set which contains 20k sentences from the BTEC corpus. We also added POS tags to the 20k training sentences and the test sentences, in order to compensate for the lexical coercion 6 phenomenon in machine translation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1580, |
|
"end": 1581, |
|
"text": "6", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 515, |
|
"end": 522, |
|
"text": "Table 4", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Reordering of Phrases", |
|
"sec_num": "2.4." |
|
}, |
|
{ |
|
"text": "Due to the efficiency of our combined global search and TSP algorithm, we didn't use a distortion model to guide the search for optimal phrase reordering paths. Reordering results are shown in Table 5 . The system before reordering is simply combining top-1 phrase translations of each source phrase without considering context, and the system after reordering is the result after statistical reordering is performed, without considering n-best translations of each phrase. Decoder System Diagram A surface-form language model trained on the corpus is used to predict the probability of any sequence of DT and RW chunks. A word penalty model is necessary to compensate for the fact that the LM systematically penalizes longer target chunks.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 193, |
|
"end": 200, |
|
"text": "Table 5", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Reordering of Phrases", |
|
"sec_num": "2.4." |
|
}, |
|
{ |
|
"text": "In this section, we present results on IWSLT's Korean-English task with limited training corpus. The training data was provided by the organizer and consists of 20k parallel sentences from BTEC corpus. The development set consists of 506 Korean sentences with 16 references, and the test set consists of 506 Korean sentences from the same domain as the training data. We note that the Korean test set has an unconventional tokenization different from the training set. For the proper morphological analysis and chunking, we modified the tokenization consistent with the training set. Table 6 summarizes some statistics of the training/test data. To assess the contribution of linguistic processing, the performance of IBM model-4 with no text processing is compared to those with processing. The results shown in Table 7 are based on the development set. SMT-plain is the result of IBM model-4 trained on the given training set. SMT-pos is the result of the same model on the modified training set with parts-of-speech tagging on the English side and parts-of-speech tagging and stemming on the Korean side. The big improvement of performance from SMT-plain to SMT-pos is due to the fact that Korean is a morphologically rich language and morphological processing such as detaching suffixes and stemming reduces the number of lexical items and hence results in a better lexical translation model.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 584, |
|
"end": 591, |
|
"text": "Table 6", |
|
"ref_id": "TABREF6" |
|
}, |
|
{ |
|
"start": 813, |
|
"end": 820, |
|
"text": "Table 7", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "For a language pair with less complexity in morphology, we do not expect such a big performance improvement with simple addition of linguistic processing.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "SMT-trans is a further modification of the training set with heuristic transformations [13] on the Korean side. Chunking and heuristic reordering are performed: the direct object phrases are moved after the verb and the auxiliary verbs are moved before the main verb. The S 2 MT includes all the linguistic processing plus statistical reordering and decoding. The contribution of statistical reordering and decoding was significant as shown in Table 8 . Before these two steps, we simply chose the best translation for each phrase without considering the context and measured the translation quality (S 2 MT top-1). S 2 MT-reordering is done by reordering the context independent best phrase translations according to a language model. After an optimal ordering is chosen,all n-best chunk translation candidates are considered in S 2 MT-decoding. To see the contribution of each module we separate reordering and decoding, however, a fully integrated system should achieve superior results.", |
|
"cite_spans": [ |
|
{ |
|
"start": 87, |
|
"end": 91, |
|
"text": "[13]", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 444, |
|
"end": 451, |
|
"text": "Table 8", |
|
"ref_id": "TABREF8" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "The best system for the development set is used for the test set evaluation and the results are shown in Table 9 , where BLEU [14] , NIST [15] , GTM [16] , METEOR [17] , WER, and PER [18] are 6 automatic evaluation metrics used in the evaluation campaign. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 126, |
|
"end": 130, |
|
"text": "[14]", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 138, |
|
"end": 142, |
|
"text": "[15]", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 149, |
|
"end": 153, |
|
"text": "[16]", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 163, |
|
"end": 167, |
|
"text": "[17]", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 183, |
|
"end": 187, |
|
"text": "[18]", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 105, |
|
"end": 112, |
|
"text": "Table 9", |
|
"ref_id": "TABREF9" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "In this paper, we present Sehda's S 2 MT system which incorporates linguistic knowledge into statistical machine translation. We show that we can perform translation with reasonable quality using very limited resources. From our experiments, linguistic processors such as a morphological analyzer and a chunk parser significantly reduce the dependence on training parallel corpus. Our new word alignment algorithm can improve both precision and recall through the incorporation of lexical knowledge and the interaction between the lexicon model and word alignment during the learning stage. The improved word alignment, together with new chunking rules, help us obtain an improved NIST score in IWSLT supplied data track evaluation. The combined optimal search and TSP solution helps us solve the phrase reordering problem in a timely fashion without significant degradation of the performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 36, |
|
"end": 37, |
|
"text": "2", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "The S 2 MT system was developed for a NSF SBIR project in news text translation domain. We made minor adaptations to the system for our participation of IWSLT evaluation. It remains a future research task to use a phrase based SMT [19] [20] instead of a word based SMT as a baseline and to find out the additional value of syntactic information.", |
|
"cite_spans": [ |
|
{ |
|
"start": 6, |
|
"end": 7, |
|
"text": "2", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 231, |
|
"end": 235, |
|
"text": "[19]", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "A similar approach is taken by[4] without much improvement on the baseline.3 We use Hangul Analysis Module (HAM) from Kukmin University for Korean morphological analysis. It separates a stem and suffixes in a word and tags them with a part of speech.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The missing argument problem is pervasive in the training and development set because they are conversational data, but there were unexpected many pronouns in the test set. We are not sure whether the pronouns in the test set are artificially added or not.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The number of words in a chunk depends on the exact definition of chunk used.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Lexical coercion is a phenomenon in natural language processing that we condition translation of a foreign word on the source word and its parts-of-speech.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This research was funded by NSF SBIR award #0441891. We also want to thank NIST Advanced Technology Program (ATP) for the support of the decoder part of S 2 MT system. We further like to thank the reviewers for their helpful comments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 155, |
|
"end": 156, |
|
"text": "2", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": "5." |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Parsing by Chunks", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Abney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1991, |
|
"venue": "Principle-Based Parsing: Computation and Psycholinguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "257--278", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abney, S. P., \"Parsing by Chunks.\" In Robert C. et al eds., Principle-Based Parsing: Computation and Psycholinguistics, pages 257--278. 1991", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Introduction to the CoNLL 2000 Shared Task: Chunking", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Sang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Buchholz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proceedings of CoNLL-2000", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "151--153", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sang, T. K. and Buchholz, S., \"Introduction to the CoNLL 2000 Shared Task: Chunking\", in Proceedings of CoNLL-2000, pp. 151 -153, Lisbon, Portugal 2000.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Chunkbased Statistical Translation", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Watanabe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Sumita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Okuno", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings. of the 41 st Annual Meeting of the Assoc. for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "303--310", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Watanabe, T., Sumita, E., and Okuno, H.G, \"Chunk- based Statistical Translation,\" in Proceedings. of the 41 st Annual Meeting of the Assoc. for Computational Linguistics, pp. 303-310. 2003.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Statistical Machine Translation Using Coercive Two-Level Syntactic Transduction", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Scafer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yarowsky", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "9--16", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Scafer, C., and Yarowsky, D., \"Statistical Machine Translation Using Coercive Two-Level Syntactic Transduction,\" in Proceedings of ACL, pp. 9-16. 2003.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "A Syntax-Based Statistical Translation Model", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Yamada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "ACAL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yamada, K. and Knight, K., \"A Syntax-Based Statistical Translation Model,\" ACAL, 2001.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Syntax for Statistical Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Och", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Final Report of John Hopkins 2003 summer workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Och, F. et al. \"Syntax for Statistical Machine Translation\", Final Report of John Hopkins 2003 summer workshop\".", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "The Mathematics of Statistical Machine Translation: Parameter Estimation", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Brown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Della Pietra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Della Pietra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Mercer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "263--311", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Brown, P. F., Della Pietra, S.A., Della Pietra, V.J., and Mercer, R. L., \"The Mathematics of Statistical Machine Translation: Parameter Estimation,\" Computational Linguistics, Vol. 2, Issue 9, pp. 263 -311, 1993.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "IBM Spoken Translation System Evaluation", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lee, S. and Roukos, S. \"IBM Spoken Translation System Evaluation,\" IWSLT-2004. 2004.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Improved Alignment Models for Statistical Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Och", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Tillmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Proc. of the Joint SIGDAT Conf. On Empirical Methods in Natural Language Processing and Very Large Corpora", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "20--28", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Och, F.J., Tillmann, C., and Ney, H., \"\"Improved Alignment Models for Statistical Machine Translation,\" in Proc. of the Joint SIGDAT Conf. On Empirical Methods in Natural Language Processing and Very Large Corpora, University of Maryland, College Park, MD, pp. 20 -28, 1999.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Handling Structural Divergences and Recovering Dropped Arguments in a Korean/English Machine Translation System", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Levoie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Palmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Fambow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Kittredge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Korelskly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Han, C., Levoie, B., Palmer, M., Fambow, O., Kittredge, R., Korelskly, T., and Kim, M., \"Handling Structural Divergences and Recovering Dropped Arguments in a Korean/English Machine Translation System.\" 1999.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Handling Translation Divergences: Combining Statistical and Symbolic Techniques in Generation-Heavy Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Habash", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Dorr", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proc. Fifth Conference of the Association for Machine Translation in the Americas", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Habash, N. and Dorr, B., \"Handling Translation Divergences: Combining Statistical and Symbolic Techniques in Generation-Heavy Machine Translation,\" In Proc. Fifth Conference of the Association for Machine Translation in the Americas, Tiburton, CA 2002.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Greedy Decoding for Statistical Machine Translation in Almost Linear Time", |
|
"authors": [ |
|
{ |
|
"first": "U", |
|
"middle": [], |
|
"last": "Germann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of HLT-NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Germann, U., \"Greedy Decoding for Statistical Machine Translation in Almost Linear Time,\" in Proceedings of HLT-NAACL, Edmonton, AB, Canada, 2003.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Statistical Machine Translation of Spontaneous Speech with Scarce Resources", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Matuson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Popovic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Zens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of IWSLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matuson E., Popovic M., Zens R., and Ney H., \"Statistical Machine Translation of Spontaneous Speech with Scarce Resources,\" in Proceedings of IWSLT, Kyoto, Japan. 2004.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "BLEU: A Method for Automatic Evaluation of Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "IBM Research", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Papineni, K., Roukos, S., Ward, T., and Zhu, W., \"BLEU: A Method for Automatic Evaluation of Machine Translation,\" Technical Report RC22176 (W0109-022), IBM Research, 2001.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Automatic evaluation of machine translation quality using n-gram co-occurrence statistics", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Doddington", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Human Language Technology: Notebook, Proceedings", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Doddington, G. \"Automatic evaluation of machine translation quality using n-gram co-occurrence statistics,\" In Human Language Technology: Notebook, Proceedings, pp. 128-132. San Diego, CA2002.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Precision and Recall of Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Melamed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Green", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Turian", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of HLT-NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Melamed, D., Green, R., and Turian, J., \"Precision and Recall of Machine Translation,\" in Proceedings of HLT- NAACL, 2004.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "METEOR: An Automatic Metric for MT Evaluation with improved correlation with Human Judgments", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Banerjee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Lavie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of Workshop on Intrinsic and Extrinsic Evaluation Measures for MT and/or Summarization, 43th Annual Meeting of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Banerjee, S., and Lavie, A., \"METEOR: An Automatic Metric for MT Evaluation with improved correlation with Human Judgments,\" in Proceedings of Workshop on Intrinsic and Extrinsic Evaluation Measures for MT and/or Summarization, 43th Annual Meeting of ACL, Ann Arbor, Michigan, June 2005.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Statistical Machine Translation: From Single Word Models to Alignment Templates", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Och", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Och, F., Statistical Machine Translation: From Single Word Models to Alignment Templates. Ph.D. thesis, RWTH Aachen, Germany, 2002.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "A Hierachical Phrase-Based Model for Statistical Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Chiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "263--270", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chiang, D., \"A Hierachical Phrase-Based Model for Statistical Machine Translation,\" ACL pp.263-270. 2005.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Integrated Phrase Segmentation And alignment Algorithm for Statistical Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Vogel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Waibel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of International Conference on Natural Language Processing and Knowledge Engineering", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhang, Y., Vogel, S. and Waibel, A., \"Integrated Phrase Segmentation And alignment Algorithm for Statistical Machine Translation,\" in Proceedings of International Conference on Natural Language Processing and Knowledge Engineering.2003.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Overview of S 2 MT", |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"text": "to find the target word j t which can be generated by source word i s according to certain optimal criterion. Alignment between source and target words may be represented by an J I \u00d7 alignment matrix ] [ constrained ML based word alignment can be formulated as follows:", |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF2": { |
|
"text": "n k M e r g e A l g o ri th m T a rg e t S e n te n ce S e a rc h S ys te m B e a m S e a rc h C h u n ke r D i re c t T a b l e P o s t P ro c es s o r S c o ri n g S ys te m F e a tu re S e l e c ti o n W o rd P e n a l t y M o d e l N o rm a l i za ti o n R e ra n k in g", |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF3": { |
|
"text": "Figure 3: Decoder System Diagram A surface-form language model trained on the corpus is used to predict the probability of any sequence of DT and RW chunks. A word penalty model is necessary to compensate for the fact that the LM systematically penalizes longer target chunks.", |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"text": "Improvement of Word Alignment", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>Precision</td><td>Recall</td></tr><tr><td>GIZA++</td><td>0.67</td><td>0.78</td></tr><tr><td>ReAlign</td><td>0.73</td><td>0.87</td></tr><tr><td>(union+context+constraint)</td><td/><td/></tr><tr><td>ReAlign</td><td>0.95</td><td>0.74</td></tr><tr><td>(intersection+context+constraint)</td><td/><td/></tr></table>" |
|
}, |
|
"TABREF2": { |
|
"text": "Example of Verbal Chunk Alignment", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>Korean chunk</td><td>English Chunk</td></tr><tr><td>\uc2f6/v +_/e</td><td>i/prp 'd/md like/vb</td></tr><tr><td>\ub418/v +_/e</td><td>can/md i/prp</td></tr><tr><td>\uac83/n \uac19/v +_/e</td><td>i/prp think/vbp</td></tr></table>" |
|
}, |
|
"TABREF3": { |
|
"text": "Example of Direct and ReWrite Translation", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>Direct Translation</td><td>ReWrite</td></tr><tr><td/><td/><td>Translation</td></tr><tr><td>\ud558\ub8fb\ubc24/n+\uc5d0/j</td><td>per/in night/nn</td><td>at/in night/nn</td></tr><tr><td>\ud558\ub8fb\ubc24/n +\uc5d0/j</td><td>a/dt night/nn</td><td>in/in night/nn</td></tr><tr><td>\ud558\ub8fb\ubc24/n +\uc5d0/j</td><td/><td>on/in night/nn</td></tr><tr><td>\ud558\ub8fb\ubc24/n +\uc5d0/j</td><td/><td>in/in a/dt night/nn</td></tr><tr><td>\ucc3d\uac00/n \ucabd/n</td><td>a/dt table/nn near/in</td><td>a/dt window/nn</td></tr><tr><td>\uc790\ub9ac/n +\ub85c/j</td><td>the/dt window/nn</td><td>seat/nn</td></tr><tr><td>\ucc3d\uac00/n \ucabd/n</td><td>a/dt window/nn</td><td>window/nn side/nn</td></tr><tr><td>\uc790\ub9ac/n +\ub85c/j</td><td>seat/nn</td><td>seat/nn</td></tr><tr><td>\ucd94\ucc9c/n +\ud558/t +</td><td>you/prp</td><td>thank/vb you/prp</td></tr><tr><td>\uc2dc/f +_/e</td><td>recommend/vb</td><td>recommend/vb</td></tr><tr><td/><td/><td>highly/rb thank/vb</td></tr><tr><td/><td/><td>you/prp</td></tr><tr><td/><td/><td>your/prp$ you/prp</td></tr><tr><td/><td/><td>recommend/vb</td></tr></table>" |
|
}, |
|
"TABREF4": { |
|
"text": "Example of Reordering", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td colspan=\"2\">before a menu</td><td>please</td><td>show me</td></tr><tr><td>after</td><td colspan=\"3\">show me a menu Please</td></tr></table>" |
|
}, |
|
"TABREF5": { |
|
"text": "", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td colspan=\"3\">Experimental results of chunk reordering</td></tr><tr><td/><td colspan=\"2\">NIST score BLEU score</td></tr><tr><td colspan=\"2\">before 5.7603</td><td>0.1641</td></tr><tr><td>after</td><td>6.1290</td><td>0.2147</td></tr><tr><td>2.5. Decoding</td><td/><td/></tr><tr><td colspan=\"3\">Sehda's MT decoder, as depicted in Figure 3, is a chunk-based</td></tr><tr><td colspan=\"3\">hybrid decoder. During the decoding stage, N-best chunk</td></tr><tr><td colspan=\"3\">translation candidates from both direct table (DT) and</td></tr><tr><td colspan=\"3\">ReWrite (RW) tables are produced from the phrase translation</td></tr><tr><td colspan=\"3\">module. The associated probabilities of these translated</td></tr><tr><td colspan=\"3\">chunks are first normalized to the global distributions of DT</td></tr><tr><td colspan=\"3\">and RW chunks separately and subsequently merged using</td></tr><tr><td colspan=\"2\">optimized contribution weights.</td><td/></tr></table>" |
|
}, |
|
"TABREF6": { |
|
"text": "Statistics", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td colspan=\"3\">of training/development/test sets</td></tr><tr><td>Corpus</td><td/><td colspan=\"2\">Korean English</td></tr><tr><td>Training</td><td>Vocabulary</td><td>8.9k</td><td>8.7k</td></tr><tr><td>Set</td><td>size</td><td/><td/></tr><tr><td/><td># of learned</td><td>32.6k</td><td>32.6k</td></tr><tr><td/><td>chunk pairs</td><td/><td/></tr><tr><td>Development</td><td># of chunks</td><td>2367</td><td>2367</td></tr><tr><td>Set</td><td>DT coverage</td><td>72%</td><td/></tr><tr><td>Test Set</td><td># of chunks</td><td>2422</td><td>2422</td></tr><tr><td/><td>DT coverage</td><td>71%</td><td/></tr></table>" |
|
}, |
|
"TABREF7": { |
|
"text": "Systems", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td colspan=\"4\">Comparison on the development set</td></tr><tr><td/><td>SMT-</td><td>SMT-</td><td>SMT-</td><td>S 2 MT</td></tr><tr><td/><td>plain</td><td>pos</td><td>trans</td><td/></tr><tr><td>NIST</td><td>2.049</td><td>4.906</td><td>5.294</td><td>6.3412</td></tr><tr><td>BLEU</td><td>0.134</td><td>0.173</td><td>0.190</td><td>0.2491</td></tr></table>" |
|
}, |
|
"TABREF8": { |
|
"text": "Contribution of Each Component of S 2 MT", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>S 2 MT</td><td>S 2 MT -</td><td>S 2 MT -</td></tr><tr><td/><td>top 1</td><td>reordering</td><td>Decoding</td></tr><tr><td>NIST</td><td>5.7603</td><td>6.1290</td><td>6.3412</td></tr><tr><td colspan=\"2\">BLEU 0.1641</td><td>0.2147</td><td>0.2491</td></tr></table>" |
|
}, |
|
"TABREF9": { |
|
"text": "Test", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>Set Result</td></tr><tr><td/><td>S 2 MT</td></tr><tr><td>NIST</td><td>6.511</td></tr><tr><td>BLEU</td><td>0.2064</td></tr><tr><td>WER</td><td>0.7033</td></tr><tr><td>PER</td><td>0.5470</td></tr><tr><td>METEOR</td><td>0.5111</td></tr><tr><td>GTM</td><td>0.4224</td></tr></table>" |
|
} |
|
} |
|
} |
|
} |