ACL-OCL / Base_JSON /prefixA /json /autosimtrans /2021.autosimtrans-1.5.json
Benjamin Aw
Add updated pkl file v3
6fa4bc9
{
"paper_id": "2021",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T02:09:47.517784Z"
},
"title": "BSTC: A Large-Scale Chinese-English Speech Translation Dataset",
"authors": [
{
"first": "Ruiqing",
"middle": [],
"last": "Zhang",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Baidu Inc. No",
"location": {
"addrLine": "10, Shangdi 10th Street",
"postCode": "100085",
"settlement": "Beijing",
"country": "China"
}
},
"email": "zhangruiqing01@baidu.com"
},
{
"first": "Xiyang",
"middle": [],
"last": "Wang",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Baidu Inc. No",
"location": {
"addrLine": "10, Shangdi 10th Street",
"postCode": "100085",
"settlement": "Beijing",
"country": "China"
}
},
"email": ""
},
{
"first": "Chuanqiang",
"middle": [],
"last": "Zhang",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Baidu Inc. No",
"location": {
"addrLine": "10, Shangdi 10th Street",
"postCode": "100085",
"settlement": "Beijing",
"country": "China"
}
},
"email": "zhangchuanqiang@baidu.com"
},
{
"first": "Zhongjun",
"middle": [],
"last": "He",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Baidu Inc. No",
"location": {
"addrLine": "10, Shangdi 10th Street",
"postCode": "100085",
"settlement": "Beijing",
"country": "China"
}
},
"email": "hezhongjun@baidu.com"
},
{
"first": "Hua",
"middle": [],
"last": "Wu",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Baidu Inc. No",
"location": {
"addrLine": "10, Shangdi 10th Street",
"postCode": "100085",
"settlement": "Beijing",
"country": "China"
}
},
"email": "wu_hua@baidu.com"
},
{
"first": "Zhi",
"middle": [],
"last": "Li",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Baidu Inc. No",
"location": {
"addrLine": "10, Shangdi 10th Street",
"postCode": "100085",
"settlement": "Beijing",
"country": "China"
}
},
"email": ""
},
{
"first": "Haifeng",
"middle": [],
"last": "Wang",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Baidu Inc. No",
"location": {
"addrLine": "10, Shangdi 10th Street",
"postCode": "100085",
"settlement": "Beijing",
"country": "China"
}
},
"email": ""
},
{
"first": "Ying",
"middle": [],
"last": "Chen",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Baidu Inc. No",
"location": {
"addrLine": "10, Shangdi 10th Street",
"postCode": "100085",
"settlement": "Beijing",
"country": "China"
}
},
"email": ""
},
{
"first": "Qinfei",
"middle": [],
"last": "Li",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Baidu Inc. No",
"location": {
"addrLine": "10, Shangdi 10th Street",
"postCode": "100085",
"settlement": "Beijing",
"country": "China"
}
},
"email": ""
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "This paper presents BSTC (Baidu Speech Translation Corpus), a large-scale Chinese-English speech translation dataset. This dataset is constructed based on a collection of licensed videos of talks or lectures, including about 68 hours of Mandarin data, their manual transcripts and translations into English, as well as automated transcripts by an automatic speech recognition (ASR) model. We have further asked three experienced interpreters to simultaneously interpret the testing talks in a mock conference setting. This corpus is expected to promote the research of automatic simultaneous translation as well as the development of practical systems. We have organized simultaneous translation tasks and used this corpus to evaluate automatic simultaneous translation systems.",
"pdf_parse": {
"paper_id": "2021",
"_pdf_hash": "",
"abstract": [
{
"text": "This paper presents BSTC (Baidu Speech Translation Corpus), a large-scale Chinese-English speech translation dataset. This dataset is constructed based on a collection of licensed videos of talks or lectures, including about 68 hours of Mandarin data, their manual transcripts and translations into English, as well as automated transcripts by an automatic speech recognition (ASR) model. We have further asked three experienced interpreters to simultaneously interpret the testing talks in a mock conference setting. This corpus is expected to promote the research of automatic simultaneous translation as well as the development of practical systems. We have organized simultaneous translation tasks and used this corpus to evaluate automatic simultaneous translation systems.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "In recent years, automatic speech translation (AST) has attracted increasing interest for its commercial potential (e.g., Simultaneous Interpretation and Wireless Speech Translator) . A large amount of research has focused on speech translation (Weiss et al., 2017; Niehues et al., 2018; Chung et al., 2018; Sperber et al., 2019; Kahn et al., 2020; Inaguma et al., 2020) and simultaneous translation (Sridhar et al., 2013; Oda et al., 2014; Cho and Esipova, 2016; Gu et al., 2017; Ma et al., 2019; Arivazhagan et al., 2019; Zhang et al., 2020) . The former intends to convert speech signals in the source language to the target language, and the latter aims to achieve a real-time translation that delivers the speech to the audience in the target language while minimizing the delay between the speaker and the translation.",
"cite_spans": [
{
"start": 135,
"end": 181,
"text": "Interpretation and Wireless Speech Translator)",
"ref_id": null
},
{
"start": 245,
"end": 265,
"text": "(Weiss et al., 2017;",
"ref_id": "BIBREF26"
},
{
"start": 266,
"end": 287,
"text": "Niehues et al., 2018;",
"ref_id": "BIBREF14"
},
{
"start": 288,
"end": 307,
"text": "Chung et al., 2018;",
"ref_id": "BIBREF4"
},
{
"start": 308,
"end": 329,
"text": "Sperber et al., 2019;",
"ref_id": "BIBREF20"
},
{
"start": 330,
"end": 348,
"text": "Kahn et al., 2020;",
"ref_id": "BIBREF11"
},
{
"start": 349,
"end": 370,
"text": "Inaguma et al., 2020)",
"ref_id": null
},
{
"start": 400,
"end": 422,
"text": "(Sridhar et al., 2013;",
"ref_id": "BIBREF21"
},
{
"start": 423,
"end": 440,
"text": "Oda et al., 2014;",
"ref_id": "BIBREF15"
},
{
"start": 441,
"end": 463,
"text": "Cho and Esipova, 2016;",
"ref_id": "BIBREF3"
},
{
"start": 464,
"end": 480,
"text": "Gu et al., 2017;",
"ref_id": "BIBREF8"
},
{
"start": 481,
"end": 497,
"text": "Ma et al., 2019;",
"ref_id": "BIBREF13"
},
{
"start": 498,
"end": 523,
"text": "Arivazhagan et al., 2019;",
"ref_id": "BIBREF0"
},
{
"start": 524,
"end": 543,
"text": "Zhang et al., 2020)",
"ref_id": "BIBREF29"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "To train an AST model, existing corpora can be classified into two categories:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "\u2022 Speech Translation corpora consist pairs of audio segments and their corresponding translations. Table 1 : Existing speech translation corpora and ours. The duration statistics of all datasets are rounded up to an integer hour. For MuST-C, the \"8 Euro langs\" is short for \"8 European languages\". Europarl-ST contains the speech translation between 9 European languages.",
"cite_spans": [],
"ref_spans": [
{
"start": 99,
"end": 106,
"text": "Table 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "\u2022 Simultaneous Translation corpora are constructed by transcribing lecturers' speeches and the streaming utterance of human interpreters.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The main difference between these two kinds of corpora lies in the way that the translations are generated. The translations in Speech Translation corpora are generated based on complete audios or their transcripts, while the translations in Simultaneous Translation corpora are transcribed from real-time human interpretation. Existing research on Speech Translation mainly focused on the translation between English and Indo-European languages 1 , with little attention paid to that between Chinese (Zh) and English. One of the reasons is the scarcity of public Zh\u2194En From:00:00:00 To:00:00:03",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Streaming Transcript Figure 1 : The process of constructing the training set and development/test sets (dev/test). The difference between the two processes is that for the training set we first split audio into sentences and then get the ASR and transcript for each sentence, while for the dev/test sets we record the real-time ASR and transcript, the sentence splitting is only used to generate translations of segmented sentences.",
"cite_spans": [],
"ref_spans": [
{
"start": 21,
"end": 29,
"text": "Figure 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Translation",
"sec_num": null
},
{
"text": "speech translation corpora. Among the public corpora, only MSLT (Federmann and Lewis, 2017) and Covost (Wang et al., 2020a,b) contains Zh\u2194En speech translation, as shown in Table 1 . But the total volume of them on Zh\u2192En translation is merely about 30 hours, which is too small to train data-hungry neural models. Some studies explore Zh\u2192En Simultaneous Translation (Ma et al., 2019; Zhang et al., 2020) . However, they take text translation datasets to simulate real-time translation scenarios because of the lack of simultaneous translation corpus.",
"cite_spans": [
{
"start": 64,
"end": 91,
"text": "(Federmann and Lewis, 2017)",
"ref_id": "BIBREF7"
},
{
"start": 103,
"end": 125,
"text": "(Wang et al., 2020a,b)",
"ref_id": null
},
{
"start": 366,
"end": 383,
"text": "(Ma et al., 2019;",
"ref_id": "BIBREF13"
},
{
"start": 384,
"end": 403,
"text": "Zhang et al., 2020)",
"ref_id": "BIBREF29"
}
],
"ref_spans": [
{
"start": 173,
"end": 180,
"text": "Table 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Translation",
"sec_num": null
},
{
"text": "To promote the research on Chinese-English speech translation, as well as evaluating the translation quality in real simultaneous interpretation environments, we construct BSTC, a large-scale Zh\u2192En speech translation and simultaneous translation dataset including approximately 68 hours of Mandarin speech data with their automatic recognition results, manual transcripts, and translations. Our contributions are:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Translation",
"sec_num": null
},
{
"text": "\u2022 We propose the first large-scale (68 hours)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Translation",
"sec_num": null
},
{
"text": "Chinese-English Speech Translation corpus. This training set is a four-way parallel dataset of Mandarin audio, transcripts, ASR lattices, and translations.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Translation",
"sec_num": null
},
{
"text": "\u2022 The proposed dev and test set constitutes the first high-quality Simultaneous Translation dataset of over 3-hour Mandarin speech, together with its streaming transcript, streaming ASR results, and high-quality translation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Translation",
"sec_num": null
},
{
"text": "\u2022 We have organized two simultaneous interpretation tasks 2 to promote research in this field and deployed a strong benchmark on this dataset.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Translation",
"sec_num": null
},
{
"text": "\u2022 The proposed dataset can also be taken as 1) a Chinese Spelling error Correction (CSC) corpus containing pairs of ASR results and corresponding manual transcripts or 2) a Zh\u2192En Document Translation dataset with contextaware translations.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Translation",
"sec_num": null
},
{
"text": "BSTC is created to fill the gap in Zh\u2192En speech translation, in terms of both size and quality. To achieve these objectives, we start by collecting approximate 68 hours of mandarin speeches from three TED-like content producers: BIT 3 , tndao.com 4 , and zaojiu.com 5 . The speeches involve a wide range of domains, including IT, economy, culture, biology, arts, etc. We randomly extract several talks from the dataset and divide them into the development and test set.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Dataset Description",
"sec_num": "2"
},
{
"text": "For the training set, we manually tag timestamps to split the audio into sentences, transcribe each sentence and ask professional translators to produce the English translations. The translation is generated based on the understanding of the entire talk and is faithful and coherent as a whole. To facilitate the research on robust speech translation, we also provide the top-5 ASR results for each segmented speech produced by SMLTA 6 , a streaming multi- Table 2 : The summary of our proposed speech translation data. layer truncated attention ASR model. Figure 1 (a) shows the construction process of the training set, together with an example of a segmented sentence.",
"cite_spans": [],
"ref_spans": [
{
"start": 457,
"end": 464,
"text": "Table 2",
"ref_id": null
},
{
"start": 557,
"end": 565,
"text": "Figure 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Training set",
"sec_num": "2.1"
},
{
"text": "For the development (dev) set and test set, we consider the simultaneous translation scenario and provide the streaming transcripts and streaming ASR results, as shown in Figure 1 (b). The streaming transcripts are produced by turning each nwords (a word means a Chinese character here) sentence to n lines word by word with length 1, 2, ..., n. We use the real-time recognition results of each speech, rather than the recognition of each sentence-segmented audio. This is to simulate the simultaneous interpreting scenario, in which the input is streaming text, rather than segmented sentences. Table 3 : The WER and coverage of different subsets of the training set with the length difference \u2206 len between transcript and asr lower than or equal to d len .",
"cite_spans": [],
"ref_spans": [
{
"start": 171,
"end": 179,
"text": "Figure 1",
"ref_id": null
},
{
"start": 596,
"end": 603,
"text": "Table 3",
"ref_id": null
}
],
"eq_spans": [],
"section": "Dev/Test set",
"sec_num": "2.2"
},
{
"text": "We summarize the statistics of our dataset in Table 2 . The distribution of talk length and utterance length in the training set is illustrated in Figure 2 and Figure 3 , respectively. The average number of utterances per talk is 176.3 in the training set, 59.8 in the dev set, and 162.5 in the test set. And the average utterance length is 27.14 in the training set, 27.26 in the dev set, and 26.49 in the test set. We also calculate the word error rate 7 (WER) of the ASR system on the three datasets. As shown in Table 2 , the WER of the training set is 27.90%, significantly higher than that of the dev and testset. This is due to the way of audio segmentation before recognition: some audio clips lose some parts in acoustic truncation, resulting in incomplete ASR results. We count the length difference of each <transcription, asr> pair, i.e., \u2206 len = |len(transcription) \u2212 len(asr)|, and recalculate the WER of pairs whose length difference is within a certain range. The WER and coverage of these subsets are listed in Table 3 . Note that when the asr and transcript with equal length (\u2206 len \u2264 0), the WER is only 5.87%. For the length difference in a relatively regular range (e.g, \u2206 len \u2264 15), the WER is also relatively low (WER=15.23%).",
"cite_spans": [],
"ref_spans": [
{
"start": 46,
"end": 54,
"text": "Table 2",
"ref_id": null
},
{
"start": 148,
"end": 156,
"text": "Figure 2",
"ref_id": null
},
{
"start": 161,
"end": 169,
"text": "Figure 3",
"ref_id": null
},
{
"start": 517,
"end": 524,
"text": "Table 2",
"ref_id": null
},
{
"start": 1029,
"end": 1036,
"text": "Table 3",
"ref_id": null
}
],
"eq_spans": [],
"section": "Statistics and Dataset Features",
"sec_num": "2.3"
},
{
"text": "Besides, there is a difference between our dataset and the existing speech translation corpora. In our dataset, speech irregularities are kept in transcrip-BLEU AP Omissions A 24.20 83.0% 53% B 17.14 62.8% 47% C 25.18 76.5% 53% Table 4 : Comparison of the simultaneous interpretation results of three interpreters (A, B, and C) on the BSTC test set. \"AP\" is the Acceptability and the \"Omissions\" indicates the proportion of missing translation in all translation errors.",
"cite_spans": [],
"ref_spans": [
{
"start": 228,
"end": 235,
"text": "Table 4",
"ref_id": null
}
],
"eq_spans": [],
"section": "Statistics and Dataset Features",
"sec_num": "2.3"
},
{
"text": "tion while omitted in translation (eg. filler words like \"\u55ef, \u5443, \u554a\", unconscious repetitions like \"\u8fd9 \u4e2a\u8fd9\u4e2a\u5462\" and some disfluencies), which can be used to evaluate the robustness of the NMT model dealing with spoken language. Some other largescale speech translation datasets (Kocabiyikoglu et al., 2018; Di Gangi et al., 2019) , on the contrary, ignore these speech irregularities in the transcript.",
"cite_spans": [
{
"start": 272,
"end": 300,
"text": "(Kocabiyikoglu et al., 2018;",
"ref_id": "BIBREF12"
},
{
"start": 301,
"end": 323,
"text": "Di Gangi et al., 2019)",
"ref_id": "BIBREF5"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Statistics and Dataset Features",
"sec_num": "2.3"
},
{
"text": "We further ask three experienced interpreters (A, B, and C) with interpreting experience ranging from four to nine years to interpret the six talks of the testset, in a mock conference setting 8 .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Human Interpretation",
"sec_num": "2.4"
},
{
"text": "To evaluate their translation quality, we also ask human translators to evaluate the transcribed interpretation from multiple aspects: adequacy, fluency, and correctness:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Human Interpretation",
"sec_num": "2.4"
},
{
"text": "\u2022 Rank1: The translation contains no obvious errors.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Human Interpretation",
"sec_num": "2.4"
},
{
"text": "\u2022 Rank2: The translation is comprehensible and adequate, but with minor errors such as incorrect function words and less fluent phrases.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Human Interpretation",
"sec_num": "2.4"
},
{
"text": "\u2022 Rank3: The translation is incorrect and unacceptable. Table 4 shows the translation quality in BLEU and acceptability, which is calculated as the sum of the percentages of Rank1 and Rank2. It shows that their acceptability ranges from 62.8% to 83.0%, but the acceptability and BLEU are not completely positively correlated. This is because human interpreters routinely omit less important information to overcome their limitations in working memory. Acceptability focuses more on accuracy and faithfulness than adequacy, so it can tolerate information omission. Therefore, some information omitted in human interpretation that results in inferior BLEU { \"offset\": \"105.975\", \"duration\": \"3.287\", \"wav\": \"2.wav\", \"transcript\": \" \"Streaming ASR\": \"translation\": \"In fact, every one of you has multiple digital devices, \"interpreter A\": \"But actually you own several devices, mobile devices, \"interpreter B\": \"But every of you have multiple equipments with you \"interpreter C\": \"But every one of you have multi devices, we have Figure 4 : A segment of one example in our test set\uff0cincluding audio, timelines, transcription, translation, streaming ASR results, and interpretation from three human interpreters (only for testing data). The red characters in \"Streaming ASR\" indicate recognition errors.",
"cite_spans": [],
"ref_spans": [
{
"start": 56,
"end": 63,
"text": "Table 4",
"ref_id": null
},
{
"start": 1027,
"end": 1035,
"text": "Figure 4",
"ref_id": null
}
],
"eq_spans": [],
"section": "Human Interpretation",
"sec_num": "2.4"
},
{
"text": "may not lead to the decrease of acceptability. But BLEU, as a statistical auto-evaluation metric, considers adequacy with the same importance with accuracy. This leads to the discrepancy between BLEU and acceptability. Figure 4 lists a segment from one example in our dataset. Notably, we only supply human interpretations for testing data. Here the \"Streaming ASR\" is the real-time recognition results, in which the \"Type:final\" means that the audio has detected a pause or silence and thus segmented, and will start to recognize a new sentence, while \"Type:partial\" is to continue recognizing the current sentence.",
"cite_spans": [],
"ref_spans": [
{
"start": 219,
"end": 227,
"text": "Figure 4",
"ref_id": null
}
],
"eq_spans": [],
"section": "Human Interpretation",
"sec_num": "2.4"
},
{
"text": "In this section, we introduce our benchmark systems based on the dataset. We conduct experiments on speech translation and simultaneous translation, respectively.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Experiments",
"sec_num": "3"
},
{
"text": "To preprocess the Chinese and the English text, we use an open-source Chinese Segmenter 9 , and Moses Tokenizer 10 . After tokenization, we convert all English letters into lower case. To train the MT model, we conduct byte-pair encoding (Sennrich et al., 2016) for both Chinese and English by setting the vocabulary size to 20K and 18K for Chinese and English, respectively. And we use the \"multibleu.pl\" 11 script to evaluate the BLEU score.",
"cite_spans": [
{
"start": 238,
"end": 261,
"text": "(Sennrich et al., 2016)",
"ref_id": "BIBREF18"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Experiments",
"sec_num": "3"
},
{
"text": "Our benchmark is a cascade system that includes an ASR module, a sentence segmentation module, and a machine translation (MT) module.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Benchmark System",
"sec_num": "3.1"
},
{
"text": "\u2022 We use the SMLTA model for ASR, i.e., the streaming transcript/ASR of BSTC is taken as the output of the ASR module.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Benchmark System",
"sec_num": "3.1"
},
{
"text": "\u2022 The sentence segmentation module is to decide when to translate in real-time. We train a classification model based on the Meaningful Unit (MU) method proposed in Zhang et al. (2020) that implements a 5-class classification (MU, comma, period, question mark, and none). The training data of meaningful units are generated automatically from monolingual sentences based on context-aware translation consistency. The model is pre-trained on ERNIE-base (Sun et al., 2020) and fine-tuned on the transcript of the BSTC training set.",
"cite_spans": [
{
"start": 165,
"end": 184,
"text": "Zhang et al. (2020)",
"ref_id": "BIBREF29"
},
{
"start": 452,
"end": 470,
"text": "(Sun et al., 2020)",
"ref_id": "BIBREF22"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Benchmark System",
"sec_num": "3.1"
},
{
"text": "\u2022 Once an MU or a sentence boundary (period or question mark) is detected in the sentence segmentation module, the MT module generates translation for the detected sentence. The MT model is firstly pre-trained on the large-scale WMT19 Chinese-English corpus, then fine-tuned on BSTC. The WMT19 corpus includes 9.1 million sentence pairs collected from different sources, i.e., Newswire, United Nations Parallel Corpus, Websites, etc. We use the big version of Transformer model in the following experiments.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Benchmark System",
"sec_num": "3.1"
},
{
"text": "tokenizer/tokenizer.perl 11 https://github.com/moses-smt/ \\mosesdecoder/blob/master/scripts/ generic/multi-bleu.perl",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Benchmark System",
"sec_num": "3.1"
},
{
"text": "Speech translation aims at translating accurately without considering system delay. Therefore, we only perform translation when sentence boundaries (periods and question marks) are detected by the sentence segmentation module.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Performance of Speech Translation",
"sec_num": "3.2"
},
{
"text": "The MT model is firstly trained on WMT, then fine-tuned on 37,901 training pairs of <transcription, translation> and <asr, translation> in two settings, respectively. The purpose of fine-tuning on transcription is to adapt the model to the speech domain, and the purpose of fine-tuning on ASR is to improve the robustness of the MT model against recognition errors. Our model pre-trained on WMT19 achieves a BLEU of 25.1 on Newstest19.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Performance of Speech Translation",
"sec_num": "3.2"
},
{
"text": "We evaluate our systems on the dev/test set using streaming transcription and streaming ASR as inputs. For each talk in the dev/test set, its streaming text is firstly segmented by the sentence segmentation module, then the translation of each segmentation is concatenated into one long sentence to evaluate the BLEU score. The results are listed in Table 5 . Note that the great gap of BLEU in dev and test sets is that, the dev set has only one reference while the testset has 4 references. Contribution of fine-tuning on speech translation data: The systems pre-trained on WMT obtain an absolute improvement both on clean and noisy input by fine-tuning on <transcription, translation>. The performance of the former model increases by 4.35 BLEU score on average and the latter model obtains 1.93 BLEU score improvement on average. This indicates the transcribed training data can still bring large improvement after pre-training on large-scale training corpus. This probably because it is closer to the test set in terms of the domain (speech) and noise (disfluencies in spoken language).",
"cite_spans": [],
"ref_spans": [
{
"start": 350,
"end": 357,
"text": "Table 5",
"ref_id": "TABREF5"
}
],
"eq_spans": [],
"section": "Performance of Speech Translation",
"sec_num": "3.2"
},
{
"text": "Training on the corpus containing the ASR errors can be effective to improve the robustness of the NMT model. This can be proved by fine-tuning on the <ASR, translation> pairs. As shown in the last row of Table 5 , the pre-trained model improves 2.93 and 2.59 BLEU scores on average for testing on streaming transcript and streaming ASR, respectively. This manifests that compared with fine-tuning the clean transcription, the model finetuned on ASR is less sensitive to false recognition results of ASR.",
"cite_spans": [],
"ref_spans": [
{
"start": 205,
"end": 212,
"text": "Table 5",
"ref_id": "TABREF5"
}
],
"eq_spans": [],
"section": "Contribution of fine-tuning on noisy data:",
"sec_num": null
},
{
"text": "Different from speech translation, the simultaneous translation should balance translation quality and latency. Therefore, we fix the ASR and MT modules to evaluate our system under different sentence segmentation results. In simultaneous translation, once an MU or a sentence boundary is detected, the MU or sentence is translated immediately. In order to maintain coherent and consistent paragraph translation, we perform context-aware translation following that except for the first segment in a sentence, the subsequent segments are translated with force-decoding.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Performance of Simultaneous Translation",
"sec_num": "3.3"
},
{
"text": "The performance of system on the dev set and test set is listed in Figure 5 and Figure 6 , respec-tively 12 . We use BLEU to evaluate the translation quality and use average lagging (AL) (Ma et al., 2019) and Consecutive Wait (CW) (Gu et al., 2017) as latency metrics. \u03b4 is the hyperparameter defined in Zhang et al. (2020) as the thresold of sentence segmentation module. It shows that the translation quality improves consistently with the increase of latency. The AL on both dev and test sets ranges from 7 to 12 and the CW ranges from 6 to 11 for points of simultaneous translation. In addition, we also draw the full-sentence translation results, as denoted by \"ASR-Sentence\" and \"Transcript-Sentences\" in the two figures. The fullsentence translation implements a high-latency policy, in which a translation is only triggered when a sentence is received. As shown in the figures, the delay of both \"ASR-Sentence\" and \"Transcript-Sentences\" is much higher than the simultaneous translation results.",
"cite_spans": [
{
"start": 187,
"end": 204,
"text": "(Ma et al., 2019)",
"ref_id": "BIBREF13"
},
{
"start": 231,
"end": 248,
"text": "(Gu et al., 2017)",
"ref_id": "BIBREF8"
},
{
"start": 304,
"end": 323,
"text": "Zhang et al. (2020)",
"ref_id": "BIBREF29"
}
],
"ref_spans": [
{
"start": 67,
"end": 75,
"text": "Figure 5",
"ref_id": "FIGREF3"
},
{
"start": 80,
"end": 88,
"text": "Figure 6",
"ref_id": "FIGREF4"
}
],
"eq_spans": [],
"section": "Performance of Simultaneous Translation",
"sec_num": "3.3"
},
{
"text": "In this paper, we release a challenging dataset for the research on Chinese-English speech translation and simultaneous translation. Based on this Table 6 : Specific data corresponding to Figure 5 and Figure 6 . dataset, we report a competitive benchmark based on a cascade system. In the future, we will expand this dataset, and propose an effective method to develop an End-to-End speech translation model.",
"cite_spans": [],
"ref_spans": [
{
"start": 147,
"end": 154,
"text": "Table 6",
"ref_id": null
},
{
"start": 188,
"end": 196,
"text": "Figure 5",
"ref_id": "FIGREF3"
},
{
"start": 201,
"end": 209,
"text": "Figure 6",
"ref_id": "FIGREF4"
}
],
"eq_spans": [],
"section": "Conclusion and Future Work",
"sec_num": "4"
},
{
"text": "Indo-European languages are a large language family. arXiv:2104.03575v3 [cs.CL] 19 Apr 2021",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "We organized two shared tasks on the 1st and 2nd Workshop on Automatic Simultaneous Translation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "https://bit.baidu.com 4 http://www.tndao.com/about-tndao 5 https://www.zaojiu.com/ 6 http://research.baidu.com/Blog/ index-view?id=109",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "WER tool: https://github.com/belambert/ asr-evaluation",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "We play the video of the speech, just like in a real simultaneous interpretation scene",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "https://github.com/fxsjy/jieba 10 https://github.com/moses-smt/ mosesdecoder/blob/master/scripts/",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "We list detailed values inTable 6",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Monotonic infinite lookback attention for simultaneous machine translation",
"authors": [
{
"first": "Naveen",
"middle": [],
"last": "Arivazhagan",
"suffix": ""
},
{
"first": "Colin",
"middle": [],
"last": "Cherry",
"suffix": ""
},
{
"first": "Wolfgang",
"middle": [],
"last": "Macherey",
"suffix": ""
},
{
"first": "Chung-Cheng",
"middle": [],
"last": "Chiu",
"suffix": ""
},
{
"first": "Semih",
"middle": [],
"last": "Yavuz",
"suffix": ""
},
{
"first": "Ruoming",
"middle": [],
"last": "Pang",
"suffix": ""
},
{
"first": "Wei",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "Colin",
"middle": [],
"last": "Raffel",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "1313--1323",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Naveen Arivazhagan, Colin Cherry, Wolfgang Macherey, Chung-Cheng Chiu, Semih Yavuz, Ruoming Pang, Wei Li, and Colin Raffel. 2019. Monotonic infinite lookback attention for simulta- neous machine translation. In Proceedings of the 57th Annual Meeting of the Association for Com- putational Linguistics, pages 1313-1323, Florence, Italy. Association for Computational Linguistics.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Listen and translate: A proof of concept for end-to-end speech-to-text translation",
"authors": [
{
"first": "Alexandre",
"middle": [],
"last": "B\u00e9rard",
"suffix": ""
},
{
"first": "Olivier",
"middle": [],
"last": "Pietquin",
"suffix": ""
},
{
"first": "Christophe",
"middle": [],
"last": "Servan",
"suffix": ""
},
{
"first": "Laurent",
"middle": [],
"last": "Besacier",
"suffix": ""
}
],
"year": 2016,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"arXiv": [
"arXiv:1612.01744"
]
},
"num": null,
"urls": [],
"raw_text": "Alexandre B\u00e9rard, Olivier Pietquin, Christophe Servan, and Laurent Besacier. 2016. Listen and translate: A proof of concept for end-to-end speech-to-text trans- lation. arXiv preprint arXiv:1612.01744.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "A corpus of spontaneous speech in lectures: The KIT lecture corpus for spoken language processing and translation",
"authors": [
{
"first": "Eunah",
"middle": [],
"last": "Cho",
"suffix": ""
},
{
"first": "Sarah",
"middle": [],
"last": "F\u00fcnfer",
"suffix": ""
},
{
"first": "Sebastian",
"middle": [],
"last": "St\u00fcker",
"suffix": ""
},
{
"first": "Alex",
"middle": [],
"last": "Waibel",
"suffix": ""
}
],
"year": 2014,
"venue": "Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC'14)",
"volume": "",
"issue": "",
"pages": "1554--1559",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Eunah Cho, Sarah F\u00fcnfer, Sebastian St\u00fcker, and Alex Waibel. 2014. A corpus of spontaneous speech in lectures: The KIT lecture corpus for spoken lan- guage processing and translation. In Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC'14), pages 1554- 1559, Reykjavik, Iceland. European Language Re- sources Association (ELRA).",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Can neural machine translation do simultaneous translation?",
"authors": [
{
"first": "Kyunghyun",
"middle": [],
"last": "Cho",
"suffix": ""
},
{
"first": "Masha",
"middle": [],
"last": "Esipova",
"suffix": ""
}
],
"year": 2016,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"arXiv": [
"arXiv:1606.02012"
]
},
"num": null,
"urls": [],
"raw_text": "Kyunghyun Cho and Masha Esipova. 2016. Can neu- ral machine translation do simultaneous translation? arXiv preprint arXiv:1606.02012.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Unsupervised cross-modal alignment of speech and text embedding spaces",
"authors": [
{
"first": "Yu-An",
"middle": [],
"last": "Chung",
"suffix": ""
},
{
"first": "Wei-Hung",
"middle": [],
"last": "Weng",
"suffix": ""
},
{
"first": "Schrasing",
"middle": [],
"last": "Tong",
"suffix": ""
},
{
"first": "James",
"middle": [],
"last": "Glass",
"suffix": ""
}
],
"year": 2018,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"arXiv": [
"arXiv:1805.07467"
]
},
"num": null,
"urls": [],
"raw_text": "Yu-An Chung, Wei-Hung Weng, Schrasing Tong, and James Glass. 2018. Unsupervised cross-modal alignment of speech and text embedding spaces. arXiv preprint arXiv:1805.07467.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Must-c: a multilingual speech translation corpus",
"authors": [
{
"first": "Di",
"middle": [],
"last": "Mattia",
"suffix": ""
},
{
"first": "Roldano",
"middle": [],
"last": "Gangi",
"suffix": ""
},
{
"first": "Luisa",
"middle": [],
"last": "Cattoni",
"suffix": ""
},
{
"first": "Matteo",
"middle": [],
"last": "Bentivogli",
"suffix": ""
},
{
"first": "Marco",
"middle": [],
"last": "Negri",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Turchi",
"suffix": ""
}
],
"year": 2019,
"venue": "2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
"volume": "",
"issue": "",
"pages": "2012--2017",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mattia A Di Gangi, Roldano Cattoni, Luisa Bentivogli, Matteo Negri, and Marco Turchi. 2019. Must-c: a multilingual speech translation corpus. In 2019 Con- ference of the North American Chapter of the Asso- ciation for Computational Linguistics: Human Lan- guage Technologies, pages 2012-2017. Association for Computational Linguistics.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Microsoft speech language translation (mslt) corpus: The iwslt 2016 release for english, french and german",
"authors": [
{
"first": "Christian",
"middle": [],
"last": "Federmann",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "William",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Lewis",
"suffix": ""
}
],
"year": 2016,
"venue": "International Workshop on Spoken Language Translation",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Christian Federmann and William D Lewis. 2016. Mi- crosoft speech language translation (mslt) corpus: The iwslt 2016 release for english, french and ger- man. In International Workshop on Spoken Lan- guage Translation.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "The microsoft speech language translation (mslt) corpus for chinese and japanese: conversational test data for machine translation and speech recognition. Proceedings of the 16th Machine Translation Summit",
"authors": [
{
"first": "Christian",
"middle": [],
"last": "Federmann",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "William",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Lewis",
"suffix": ""
}
],
"year": 2017,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Christian Federmann and William D Lewis. 2017. The microsoft speech language translation (mslt) corpus for chinese and japanese: conversational test data for machine translation and speech recognition. Pro- ceedings of the 16th Machine Translation Summit, Nagoya, Japan.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Learning to translate in real-time with neural machine translation",
"authors": [
{
"first": "Jiatao",
"middle": [],
"last": "Gu",
"suffix": ""
},
{
"first": "Graham",
"middle": [],
"last": "Neubig",
"suffix": ""
},
{
"first": "Kyunghyun",
"middle": [],
"last": "Cho",
"suffix": ""
},
{
"first": "O",
"middle": [
"K"
],
"last": "Victor",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Li",
"suffix": ""
}
],
"year": 2017,
"venue": "Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics",
"volume": "1",
"issue": "",
"pages": "1053--1062",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jiatao Gu, Graham Neubig, Kyunghyun Cho, and Vic- tor OK Li. 2017. Learning to translate in real-time with neural machine translation. In Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics: Volume 1, Long Papers, pages 1053-1062.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Tomoki Hayashi, and Shinji Watanabe. 2020. Espnet-st: Allin-one speech translation toolkit",
"authors": [
{
"first": "Hirofumi",
"middle": [],
"last": "Inaguma",
"suffix": ""
},
{
"first": "Shun",
"middle": [],
"last": "Kiyono",
"suffix": ""
},
{
"first": "Kevin",
"middle": [],
"last": "Duh",
"suffix": ""
},
{
"first": "Shigeki",
"middle": [],
"last": "Karita",
"suffix": ""
}
],
"year": null,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"arXiv": [
"arXiv:2004.10234"
]
},
"num": null,
"urls": [],
"raw_text": "Hirofumi Inaguma, Shun Kiyono, Kevin Duh, Shigeki Karita, Nelson Enrique Yalta Soplin, Tomoki Hayashi, and Shinji Watanabe. 2020. Espnet-st: All- in-one speech translation toolkit. arXiv preprint arXiv:2004.10234.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Europarl-st: A multilingual corpus for speech translation of parliamentary debates",
"authors": [
{
"first": "Javier",
"middle": [],
"last": "Iranzo-S\u00e1nchez",
"suffix": ""
},
{
"first": "Joan",
"middle": [
"Albert"
],
"last": "Silvestre-Cerd\u00e0",
"suffix": ""
},
{
"first": "Javier",
"middle": [],
"last": "Jorge",
"suffix": ""
},
{
"first": "Nahuel",
"middle": [],
"last": "Rosell\u00f3",
"suffix": ""
},
{
"first": "Adri\u00e0",
"middle": [],
"last": "Gim\u00e9nez",
"suffix": ""
},
{
"first": "Albert",
"middle": [],
"last": "Sanchis",
"suffix": ""
},
{
"first": "Jorge",
"middle": [],
"last": "Civera",
"suffix": ""
},
{
"first": "Alfons",
"middle": [],
"last": "Juan",
"suffix": ""
}
],
"year": 2020,
"venue": "ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)",
"volume": "",
"issue": "",
"pages": "8229--8233",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Javier Iranzo-S\u00e1nchez, Joan Albert Silvestre-Cerd\u00e0, Javier Jorge, Nahuel Rosell\u00f3, Adri\u00e0 Gim\u00e9nez, Al- bert Sanchis, Jorge Civera, and Alfons Juan. 2020. Europarl-st: A multilingual corpus for speech trans- lation of parliamentary debates. In ICASSP 2020- 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 8229-8233. IEEE.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Libri-light: A benchmark for asr with limited or no supervision",
"authors": [
{
"first": "Jacob",
"middle": [],
"last": "Kahn",
"suffix": ""
},
{
"first": "Morgane",
"middle": [],
"last": "Rivi\u00e8re",
"suffix": ""
},
{
"first": "Weiyi",
"middle": [],
"last": "Zheng",
"suffix": ""
},
{
"first": "Evgeny",
"middle": [],
"last": "Kharitonov",
"suffix": ""
},
{
"first": "Qiantong",
"middle": [],
"last": "Xu",
"suffix": ""
},
{
"first": "Pierre-Emmanuel",
"middle": [],
"last": "Mazar\u00e9",
"suffix": ""
},
{
"first": "Julien",
"middle": [],
"last": "Karadayi",
"suffix": ""
},
{
"first": "Vitaliy",
"middle": [],
"last": "Liptchinsky",
"suffix": ""
},
{
"first": "Ronan",
"middle": [],
"last": "Collobert",
"suffix": ""
},
{
"first": "Christian",
"middle": [],
"last": "Fuegen",
"suffix": ""
}
],
"year": 2020,
"venue": "ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)",
"volume": "",
"issue": "",
"pages": "7669--7673",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jacob Kahn, Morgane Rivi\u00e8re, Weiyi Zheng, Evgeny Kharitonov, Qiantong Xu, Pierre-Emmanuel Mazar\u00e9, Julien Karadayi, Vitaliy Liptchinsky, Ronan Collobert, Christian Fuegen, et al. 2020. Libri-light: A benchmark for asr with limited or no supervision. In ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 7669-7673. IEEE.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Augmenting librispeech with french translations: A multimodal corpus for direct speech translation evaluation. Language Resources and Evaluation",
"authors": [
{
"first": "Laurent",
"middle": [],
"last": "Ali Can Kocabiyikoglu",
"suffix": ""
},
{
"first": "Olivier",
"middle": [],
"last": "Besacier",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Kraif",
"suffix": ""
}
],
"year": 2018,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Ali Can Kocabiyikoglu, Laurent Besacier, and Olivier Kraif. 2018. Augmenting librispeech with french translations: A multimodal corpus for direct speech translation evaluation. Language Resources and Evaluation.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "STACL: simultaneous translation with integrated anticipation and controllable latency",
"authors": [
{
"first": "Mingbo",
"middle": [],
"last": "Ma",
"suffix": ""
},
{
"first": "Liang",
"middle": [],
"last": "Huang",
"suffix": ""
},
{
"first": "Hao",
"middle": [],
"last": "Xiong",
"suffix": ""
},
{
"first": "Kaibo",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Chuanqiang",
"middle": [],
"last": "Zhang",
"suffix": ""
},
{
"first": "Zhongjun",
"middle": [],
"last": "He",
"suffix": ""
},
{
"first": "Hairong",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Xing",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "Haifeng",
"middle": [],
"last": "Wang",
"suffix": ""
}
],
"year": 2019,
"venue": "ACL 2019",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mingbo Ma, Liang Huang, Hao Xiong, Kaibo Liu, Chuanqiang Zhang, Zhongjun He, Hairong Liu, Xing Li, and Haifeng Wang. 2019. STACL: si- multaneous translation with integrated anticipation and controllable latency. In ACL 2019, volume abs/1810.08398.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Low-latency neural speech translation",
"authors": [
{
"first": "Jan",
"middle": [],
"last": "Niehues",
"suffix": ""
},
{
"first": "Quan",
"middle": [],
"last": "Pham",
"suffix": ""
},
{
"first": "Thanh",
"middle": [
"Le"
],
"last": "Ha",
"suffix": ""
},
{
"first": "Matthias",
"middle": [],
"last": "Sperber",
"suffix": ""
},
{
"first": "Alex",
"middle": [],
"last": "Waibel",
"suffix": ""
}
],
"year": 2018,
"venue": "",
"volume": "",
"issue": "",
"pages": "1293--1297",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jan Niehues, Quan Pham, Thanh Le Ha, Matthias Sperber, and Alex Waibel. 2018. Low-latency neu- ral speech translation. In Interspeech 2018, pages 1293-1297.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "Optimizing segmentation strategies for simultaneous speech translation",
"authors": [
{
"first": "Yusuke",
"middle": [],
"last": "Oda",
"suffix": ""
},
{
"first": "Graham",
"middle": [],
"last": "Neubig",
"suffix": ""
},
{
"first": "Sakriani",
"middle": [],
"last": "Sakti",
"suffix": ""
},
{
"first": "Tomoki",
"middle": [],
"last": "Toda",
"suffix": ""
},
{
"first": "Satoshi",
"middle": [],
"last": "Nakamura",
"suffix": ""
}
],
"year": 2014,
"venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics",
"volume": "2",
"issue": "",
"pages": "551--556",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yusuke Oda, Graham Neubig, Sakriani Sakti, Tomoki Toda, and Satoshi Nakamura. 2014. Optimizing seg- mentation strategies for simultaneous speech transla- tion. In Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics (Vol- ume 2: Short Papers), volume 2, pages 551-556.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "Automatic translation from parallel speech: Simultaneous interpretation as mt training data",
"authors": [
{
"first": "Matthias",
"middle": [],
"last": "Paulik",
"suffix": ""
},
{
"first": "Alex",
"middle": [],
"last": "Waibel",
"suffix": ""
}
],
"year": 2009,
"venue": "IEEE Workshop on Automatic Speech Recognition & Understanding",
"volume": "",
"issue": "",
"pages": "496--501",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Matthias Paulik and Alex Waibel. 2009. Automatic translation from parallel speech: Simultaneous inter- pretation as mt training data. In 2009 IEEE Work- shop on Automatic Speech Recognition & Under- standing, pages 496-501. IEEE.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "Improved speech-to-text translation with the Fisher and Callhome Spanish-English speech translation corpus",
"authors": [
{
"first": "Matt",
"middle": [],
"last": "Post",
"suffix": ""
},
{
"first": "Gaurav",
"middle": [],
"last": "Kumar",
"suffix": ""
},
{
"first": "Adam",
"middle": [],
"last": "Lopez",
"suffix": ""
},
{
"first": "Damianos",
"middle": [],
"last": "Karakos",
"suffix": ""
}
],
"year": 2013,
"venue": "Proceedings of the International Workshop on Spoken Language Translation (IWSLT)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Matt Post, Gaurav Kumar, Adam Lopez, Damianos Karakos, Chris Callison-Burch, and Sanjeev Khu- danpur. 2013. Improved speech-to-text transla- tion with the Fisher and Callhome Spanish-English speech translation corpus. In Proceedings of the In- ternational Workshop on Spoken Language Transla- tion (IWSLT), Heidelberg, Germany.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "Neural machine translation of rare words with subword units",
"authors": [
{
"first": "Rico",
"middle": [],
"last": "Sennrich",
"suffix": ""
},
{
"first": "Barry",
"middle": [],
"last": "Haddow",
"suffix": ""
},
{
"first": "Alexandra",
"middle": [],
"last": "Birch",
"suffix": ""
}
],
"year": 2016,
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics",
"volume": "1",
"issue": "",
"pages": "1715--1725",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural machine translation of rare words with subword units. In Proceedings of the 54th Annual Meeting of the Association for Computational Lin- guistics (Volume 1: Long Papers), volume 1, pages 1715-1725.",
"links": null
},
"BIBREF19": {
"ref_id": "b19",
"title": "Collection of a simultaneous translation corpus for comparative analysis",
"authors": [
{
"first": "Hiroaki",
"middle": [],
"last": "Shimizu",
"suffix": ""
},
{
"first": "Graham",
"middle": [],
"last": "Neubig",
"suffix": ""
},
{
"first": "Sakriani",
"middle": [],
"last": "Sakti",
"suffix": ""
},
{
"first": "Tomoki",
"middle": [],
"last": "Toda",
"suffix": ""
},
{
"first": "Satoshi",
"middle": [],
"last": "Nakamura",
"suffix": ""
}
],
"year": 2014,
"venue": "LREC",
"volume": "",
"issue": "",
"pages": "670--673",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hiroaki Shimizu, Graham Neubig, Sakriani Sakti, Tomoki Toda, and Satoshi Nakamura. 2014. Collec- tion of a simultaneous translation corpus for compar- ative analysis. In LREC, pages 670-673. Citeseer.",
"links": null
},
"BIBREF20": {
"ref_id": "b20",
"title": "Attention-passing models for robust and data-efficient end-to-end speech translation",
"authors": [
{
"first": "Matthias",
"middle": [],
"last": "Sperber",
"suffix": ""
},
{
"first": "Graham",
"middle": [],
"last": "Neubig",
"suffix": ""
}
],
"year": 2019,
"venue": "Transactions of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Matthias Sperber, Graham Neubig, Jan Niehues, and Alex Waibel. 2019. Attention-passing models for robust and data-efficient end-to-end speech transla- tion. In Transactions of the Association for Compu- tational Linguistics.",
"links": null
},
"BIBREF21": {
"ref_id": "b21",
"title": "Segmentation strategies for streaming speech translation",
"authors": [
{
"first": "Vivek",
"middle": [],
"last": "Kumar Rangarajan",
"suffix": ""
},
{
"first": "John",
"middle": [],
"last": "Sridhar",
"suffix": ""
},
{
"first": "Srinivas",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Bangalore",
"suffix": ""
}
],
"year": 2013,
"venue": "Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
"volume": "",
"issue": "",
"pages": "230--238",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Vivek Kumar Rangarajan Sridhar, John Chen, Srinivas Bangalore, Andrej Ljolje, and Rathinavelu Chengal- varayan. 2013. Segmentation strategies for stream- ing speech translation. In Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 230-238.",
"links": null
},
"BIBREF22": {
"ref_id": "b22",
"title": "Ernie 2.0: A continual pre-training framework for language understanding",
"authors": [
{
"first": "Yu",
"middle": [],
"last": "Sun",
"suffix": ""
},
{
"first": "Shuohuan",
"middle": [],
"last": "Wang",
"suffix": ""
},
{
"first": "Yukun",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "Shikun",
"middle": [],
"last": "Feng",
"suffix": ""
},
{
"first": "Hua",
"middle": [],
"last": "Hao Tian",
"suffix": ""
},
{
"first": "Haifeng",
"middle": [],
"last": "Wu",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Wang",
"suffix": ""
}
],
"year": 2020,
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence",
"volume": "34",
"issue": "",
"pages": "8968--8975",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Hao Tian, Hua Wu, and Haifeng Wang. 2020. Ernie 2.0: A continual pre-training framework for language un- derstanding. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 8968- 8975.",
"links": null
},
"BIBREF23": {
"ref_id": "b23",
"title": "Ciair simultaneous interpretation corpus",
"authors": [
{
"first": "Hitomi",
"middle": [],
"last": "Tohyama",
"suffix": ""
},
{
"first": "Shigeki",
"middle": [],
"last": "Matsubara",
"suffix": ""
},
{
"first": "Koichiro",
"middle": [],
"last": "Ryu",
"suffix": ""
},
{
"first": "Yasuyoshi",
"middle": [],
"last": "Kawaguch",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Inagaki",
"suffix": ""
}
],
"year": 2004,
"venue": "Proc. Oriental COCOSDA",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hitomi Tohyama, Shigeki Matsubara, Koichiro Ryu, N Kawaguch, and Yasuyoshi Inagaki. 2004. Ciair simultaneous interpretation corpus. In Proc. Orien- tal COCOSDA.",
"links": null
},
"BIBREF24": {
"ref_id": "b24",
"title": "Covost: A diverse multilingual speech-to-text translation corpus",
"authors": [
{
"first": "Changhan",
"middle": [],
"last": "Wang",
"suffix": ""
},
{
"first": "Juan",
"middle": [],
"last": "Pino",
"suffix": ""
},
{
"first": "Anne",
"middle": [],
"last": "Wu",
"suffix": ""
},
{
"first": "Jiatao",
"middle": [],
"last": "Gu",
"suffix": ""
}
],
"year": 2020,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"arXiv": [
"arXiv:2002.01320"
]
},
"num": null,
"urls": [],
"raw_text": "Changhan Wang, Juan Pino, Anne Wu, and Jiatao Gu. 2020a. Covost: A diverse multilingual speech-to-text translation corpus. arXiv preprint arXiv:2002.01320.",
"links": null
},
"BIBREF25": {
"ref_id": "b25",
"title": "Covost 2: A massively multilingual speechto-text translation corpus",
"authors": [
{
"first": "Changhan",
"middle": [],
"last": "Wang",
"suffix": ""
},
{
"first": "Anne",
"middle": [],
"last": "Wu",
"suffix": ""
},
{
"first": "Juan",
"middle": [],
"last": "Pino",
"suffix": ""
}
],
"year": 2020,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"arXiv": [
"arXiv:2007.10310"
]
},
"num": null,
"urls": [],
"raw_text": "Changhan Wang, Anne Wu, and Juan Pino. 2020b. Covost 2: A massively multilingual speech- to-text translation corpus. arXiv preprint arXiv:2007.10310.",
"links": null
},
"BIBREF26": {
"ref_id": "b26",
"title": "Sequence-tosequence models can directly translate foreign speech",
"authors": [
{
"first": "J",
"middle": [],
"last": "Ron",
"suffix": ""
},
{
"first": "Jan",
"middle": [],
"last": "Weiss",
"suffix": ""
},
{
"first": "Navdeep",
"middle": [],
"last": "Chorowski",
"suffix": ""
},
{
"first": "Yonghui",
"middle": [],
"last": "Jaitly",
"suffix": ""
},
{
"first": "Zhifeng",
"middle": [],
"last": "Wu",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Chen",
"suffix": ""
}
],
"year": 2017,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"arXiv": [
"arXiv:1703.08581"
]
},
"num": null,
"urls": [],
"raw_text": "Ron J Weiss, Jan Chorowski, Navdeep Jaitly, Yonghui Wu, and Zhifeng Chen. 2017. Sequence-to- sequence models can directly translate foreign speech. arXiv preprint arXiv:1703.08581.",
"links": null
},
"BIBREF27": {
"ref_id": "b27",
"title": "A corpus for amharicenglish speech translation: the case of tourism domain",
"authors": [
{
"first": "Laurent",
"middle": [],
"last": "Michael Melese Woldeyohannis",
"suffix": ""
},
{
"first": "Million",
"middle": [],
"last": "Besacier",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Meshesha",
"suffix": ""
}
],
"year": 2017,
"venue": "International Conference on Information and Communication Technology for Develoment for Africa",
"volume": "",
"issue": "",
"pages": "129--139",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Michael Melese Woldeyohannis, Laurent Besacier, and Million Meshesha. 2017. A corpus for amharic- english speech translation: the case of tourism do- main. In International Conference on Information and Communication Technology for Develoment for Africa, pages 129-139. Springer.",
"links": null
},
"BIBREF28": {
"ref_id": "b28",
"title": "Dutongchuan: Context-aware translation model for simultaneous interpreting",
"authors": [
{
"first": "Hao",
"middle": [],
"last": "Xiong",
"suffix": ""
},
{
"first": "Ruiqing",
"middle": [],
"last": "Zhang",
"suffix": ""
},
{
"first": "Chuanqiang",
"middle": [],
"last": "Zhang",
"suffix": ""
},
{
"first": "Zhongjun",
"middle": [],
"last": "He",
"suffix": ""
},
{
"first": "Hua",
"middle": [],
"last": "Wu",
"suffix": ""
},
{
"first": "Haifeng",
"middle": [],
"last": "Wang",
"suffix": ""
}
],
"year": 2019,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"arXiv": [
"arXiv:1907.12984"
]
},
"num": null,
"urls": [],
"raw_text": "Hao Xiong, Ruiqing Zhang, Chuanqiang Zhang, Zhongjun He, Hua Wu, and Haifeng Wang. 2019. Dutongchuan: Context-aware translation model for simultaneous interpreting. arXiv preprint arXiv:1907.12984.",
"links": null
},
"BIBREF29": {
"ref_id": "b29",
"title": "Learning adaptive segmentation policy for simultaneous translation",
"authors": [
{
"first": "Ruiqing",
"middle": [],
"last": "Zhang",
"suffix": ""
},
{
"first": "Chuanqiang",
"middle": [],
"last": "Zhang",
"suffix": ""
},
{
"first": "Zhongjun",
"middle": [],
"last": "He",
"suffix": ""
},
{
"first": "Hua",
"middle": [],
"last": "Wu",
"suffix": ""
},
{
"first": "Haifeng",
"middle": [],
"last": "Wang",
"suffix": ""
}
],
"year": 2020,
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
"volume": "",
"issue": "",
"pages": "2280--2289",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Ruiqing Zhang, Chuanqiang Zhang, Zhongjun He, Hua Wu, and Haifeng Wang. 2020. Learning adaptive segmentation policy for simultaneous translation. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 2280-2289, Online. Association for Computa- tional Linguistics.",
"links": null
}
},
"ref_entries": {
"FIGREF1": {
"text": "The distribution of talk length (number of sentences) in the training set. The distribution of utterance length (number of words) in the training set. A word means a Chinese character here.",
"type_str": "figure",
"num": null,
"uris": null
},
"FIGREF3": {
"text": "Translation quality against latency metrics on BSTC development set. \"ASR-Sentence\" and \"Transcript-Sentence\" denotes the results of full-sentence translation with ASR input and transcript input, respectively.",
"type_str": "figure",
"num": null,
"uris": null
},
"FIGREF4": {
"text": "Translation quality against latency metrics on BSTC testset.",
"type_str": "figure",
"num": null,
"uris": null
},
"TABREF4": {
"html": null,
"num": null,
"type_str": "table",
"text": "Finetune on <transcript, translation> 23.47(2.69\u2191) 41.14(6.01\u2191) 19.68(1.46\u2191) 35.71(2.39\u2191) Finetune on <ASR, translation> 22.53(1.75\u2191) 39.23(4.1\u2191) 19.82(1.6\u2191) 36.89(3.57\u2191)",
"content": "<table><tr><td>Systems</td><td colspan=\"2\">Test on Transcript Dev Test</td><td colspan=\"2\">Test on ASR Dev Test</td></tr><tr><td>pre-train on WMT</td><td>20.78</td><td>35.13</td><td>18.22</td><td>33.32</td></tr></table>"
},
"TABREF5": {
"html": null,
"num": null,
"type_str": "table",
"text": "The results of benchmark trained on different training datasets, and evaluated by streaming transcription and ASR input.",
"content": "<table/>"
}
}
}
}