|
{ |
|
"paper_id": "O06-3004", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T08:07:49.500991Z" |
|
}, |
|
"title": "A Comparative Study of Four Language Identification Systems", |
|
"authors": [ |
|
{ |
|
"first": "Bin", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "mabin@i2r.a-star.edu.sg" |
|
}, |
|
{ |
|
"first": "Haizhou", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "In this paper, we compare four typical spoken language identification (LID) systems. We introduce a novel acoustic segment modeling approach for the LID system frontend. It is assumed that the overall sound characteristics of all spoken languages can be covered by a universal collection of acoustic segment models (ASMs) without imposing strict phonetic definitions. The ASM models are used to decode spoken utterances into strings of segment units in parallel phone recognition (PPR) and universal phone recognition (UPR) frontends. We also propose a novel approach to LID system backend design, where the statistics of ASMs and their co-occurrences are used to form ASM-derived feature vectors, in a vector space modeling (VSM) approach, as opposed to the traditional language modeling (LM) approach, in order to discriminate between individual spoken languages. Four LID systems are built to evaluate the effects of two different frontends and two different backends. We evaluate the four systems based on the 1996, 2003 and 2005 NIST Language Recognition Evaluation (LRE) tasks. The results show that the proposed ASM-based VSM framework reduces the LID error rate quite significantly when compared with the widely-used parallel PRLM method. Among the four configurations, the PPR-VSM system demonstrates the best performance across all of the tasks.", |
|
"pdf_parse": { |
|
"paper_id": "O06-3004", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "In this paper, we compare four typical spoken language identification (LID) systems. We introduce a novel acoustic segment modeling approach for the LID system frontend. It is assumed that the overall sound characteristics of all spoken languages can be covered by a universal collection of acoustic segment models (ASMs) without imposing strict phonetic definitions. The ASM models are used to decode spoken utterances into strings of segment units in parallel phone recognition (PPR) and universal phone recognition (UPR) frontends. We also propose a novel approach to LID system backend design, where the statistics of ASMs and their co-occurrences are used to form ASM-derived feature vectors, in a vector space modeling (VSM) approach, as opposed to the traditional language modeling (LM) approach, in order to discriminate between individual spoken languages. Four LID systems are built to evaluate the effects of two different frontends and two different backends. We evaluate the four systems based on the 1996, 2003 and 2005 NIST Language Recognition Evaluation (LRE) tasks. The results show that the proposed ASM-based VSM framework reduces the LID error rate quite significantly when compared with the widely-used parallel PRLM method. Among the four configurations, the PPR-VSM system demonstrates the best performance across all of the tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Automatic language identification (LID) is the process of determining the language identity corresponding to a spoken query. It is an important technology in many applications, such as spoken language translation, multilingual speech recognition [Ma et al. 2002] , and spoken document retrieval [Dai et al. 2003 ]. In the past few decades, many statistical approaches to LID have been developed [Kirchhoff et al. 2002] [Matrouf et al. 1998 ] [Nagarajan and Murthy 2004] [Parandekar and Kirchhoff 2003] [Singer et al. 2003] [ Torres-Carrasquillo et al. 2002] [Yan and Barnard 1995] [Zissman 1996 ] by exploiting recent advances in the acoustic modeling [Singer et al. 2003] [Torres-Carrasquillo et al. 2002] of phone units and the language modeling of n-grams of these phones [ Parandekar and Kirchhoff 2003 ]. Acoustic phone models are used in language-dependent continuous phone recognition to convert speech utterances into sequences of phone symbols in a tokenization process. Then the scores from acoustic models and the scores from language models are combined to obtain a language-specific score for making a final LID decision [Zissman 1996 ].", |
|
"cite_spans": [ |
|
{ |
|
"start": 246, |
|
"end": 262, |
|
"text": "[Ma et al. 2002]", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 295, |
|
"end": 311, |
|
"text": "[Dai et al. 2003", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 395, |
|
"end": 418, |
|
"text": "[Kirchhoff et al. 2002]", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 419, |
|
"end": 439, |
|
"text": "[Matrouf et al. 1998", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 442, |
|
"end": 469, |
|
"text": "[Nagarajan and Murthy 2004]", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 470, |
|
"end": 501, |
|
"text": "[Parandekar and Kirchhoff 2003]", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 502, |
|
"end": 522, |
|
"text": "[Singer et al. 2003]", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 525, |
|
"end": 557, |
|
"text": "Torres-Carrasquillo et al. 2002]", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 558, |
|
"end": 580, |
|
"text": "[Yan and Barnard 1995]", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 581, |
|
"end": 594, |
|
"text": "[Zissman 1996", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 652, |
|
"end": 672, |
|
"text": "[Singer et al. 2003]", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 673, |
|
"end": 706, |
|
"text": "[Torres-Carrasquillo et al. 2002]", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 777, |
|
"end": 806, |
|
"text": "Parandekar and Kirchhoff 2003", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 1134, |
|
"end": 1147, |
|
"text": "[Zissman 1996", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "Syllable-like units have also been studied [Nagarajan and Murthy 2004] . To further improve the LID performance, other information, such as articulatory and acoustic features [Kirchhoff et al. 2002] [Sugiyama 1991 ], lexical knowledge [Adda-Decker et al. 2003] [Ma et al. 2002] and prosody [Hazen and Zue 1994] , have also been integrated into LID systems. Zissman [1996] experimentally showed that phonetic language models can sometimes be more powerful than MFCC-based Gaussian mixture models (GMMs) [Torres-Carrasquillo et al. 2002] . Therefore the fusion of high-level features and good utilization of their statistics are two important research topics for LID.", |
|
"cite_spans": [ |
|
{ |
|
"start": 43, |
|
"end": 70, |
|
"text": "[Nagarajan and Murthy 2004]", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 175, |
|
"end": 198, |
|
"text": "[Kirchhoff et al. 2002]", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 199, |
|
"end": 213, |
|
"text": "[Sugiyama 1991", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 235, |
|
"end": 260, |
|
"text": "[Adda-Decker et al. 2003]", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 261, |
|
"end": 277, |
|
"text": "[Ma et al. 2002]", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 290, |
|
"end": 310, |
|
"text": "[Hazen and Zue 1994]", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 357, |
|
"end": 371, |
|
"text": "Zissman [1996]", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 502, |
|
"end": 535, |
|
"text": "[Torres-Carrasquillo et al. 2002]", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "To make use of high-level features, the LID problem can be taken as consisting of two sub-problems, the tokenization problem and the classification problem. When the tokenization problem is addressed, a fundamental question that arises is whether phone definition is really needed to identify spoken languages. When human beings are constantly exposed to a language without being given any linguistic knowledge, they learn to determine the language's identity by perceiving some of the speech cues in the language. It is also noteworthy that in human perceptual experiments, listeners with multilingual background often perform better than monolingual listeners in identifying unfamiliar languages [Muthusamy et al. 1994] . These results motivate us to look for useful speech cues for LID along the same line of a recently proposed automatic speech attribute transcription (ASAT) paradigm for automatic speech recognition [Lee 2004 ]. When we address the classification problem, we find that the strategies such as feature representation for spoken documents and classifier design principles have direct impacts on LID performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 698, |
|
"end": 721, |
|
"text": "[Muthusamy et al. 1994]", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 922, |
|
"end": 931, |
|
"text": "[Lee 2004", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "In this paper, we adopt the acoustic segment modeling approach to address the tokenization problem. It is assumed that the sound characteristics of all spoken languages can be covered by a set of acoustic units without strict phonetic definitions, which are called acoustic segment models (ASMs) [Lee et al. 1998 ]. They can be used to decode spoken utterances into strings of such units. We also propose a vector space modeling approach (VSM) to classifier design where the statistics of the units and their co-occurrences corresponding to spoken utterances are used to construct feature vectors.", |
|
"cite_spans": [ |
|
{ |
|
"start": 296, |
|
"end": 312, |
|
"text": "[Lee et al. 1998", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 438, |
|
"end": 443, |
|
"text": "(VSM)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "Hidden Markov modeling (HMM) [Rabiner 1989 ] is the dominant approach to acoustic modeling. A collection of ASMs is established from the bottom up in an unsupervised manner using HMM, and has been used to construct an acoustic lexicon for isolated word recognition with high accuracy [Lee et al. 1998 ]. In LID research, a large body of prior work in LID has been devoted to the PR-LM framework (the phone-recognition frontend followed by the language model backend) [Zissman 1996 ] and its variations, where phonetic units are used as acoustic units. This is also referred to as the phonotactic approach. The phonotactic approach has been shown to achieve superior performance in NIST LRE tasks especially when it is fused with acoustic scores [Singer et al. 2003] . In this paper, we investigate four LID system configurations cast in a formalism of frontend feature extraction and backend classifier, namely parallel phone recognizer (PPR) and universal phone recognizer (UPR) frontends, and n-gram language model (LM) and vector space model (VSM) backends. We show that the ASM-based PPR-VSM system configuration achieves the best performance across 1996 This paper is organized as follows. In Section 2, we introduce the acoustic segment modeling approach. In Section 3, we discuss LID systems by studying their frontends and backends. In Section 4, we present the experimental results on four front-backend combinations. We draw conclusions in Section 5.", |
|
"cite_spans": [ |
|
{ |
|
"start": 29, |
|
"end": 42, |
|
"text": "[Rabiner 1989", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 284, |
|
"end": 300, |
|
"text": "[Lee et al. 1998", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 467, |
|
"end": 480, |
|
"text": "[Zissman 1996", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 745, |
|
"end": 765, |
|
"text": "[Singer et al. 2003]", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 1147, |
|
"end": 1158, |
|
"text": "across 1996", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "A tokenizer is needed to convert spoken utterances into sequences of fundamental acoustic units specified in an acoustic inventory. We believe that units that are not linked to a particular phonetic definition can be more universal, and therefore conceptually easier to adopt. Such acoustic units are thus highly desirable for universal language characterization, especially for rarely observed languages, languages without orthographies, or languages without well-documented phonetic dictionary.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic Segment Modeling", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "A number of variants have been developed along these lines, which have been referred to as language-independent acoustic phone models. Hazen and Zue [1994] reported using 87 phones from the multilingual OGI-TS corpus. Berkling and Barnard [1994a] explored the possibility of finding and using only those phones that best discriminate between language pairs. Barnard [1994b] and Corredor-Ardoy et al. [1997] used phone clustering algorithms to find common sets of phones for languages. However, these systems could only operate when a phonetically transcribed database was available. On a separate front, a general effort to circumvent the need for phonetic transcription can be traced back to [Lee et al. 1998 ] on automatic speech recognition, where ASM was constructed in an unsupervised manner.", |
|
"cite_spans": [ |
|
{ |
|
"start": 135, |
|
"end": 155, |
|
"text": "Hazen and Zue [1994]", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 218, |
|
"end": 246, |
|
"text": "Berkling and Barnard [1994a]", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 358, |
|
"end": 377, |
|
"text": "Barnard [1994b] and", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 378, |
|
"end": 406, |
|
"text": "Corredor-Ardoy et al. [1997]", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 693, |
|
"end": 709, |
|
"text": "[Lee et al. 1998", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic Segment Modeling", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "Some recent studies have applied this concept to LID [Sai Jayram et al. 2003 ]. Motivated by the above efforts, we propose here an ASM method for establishing a universal representation of acoustic units for multiple languages.", |
|
"cite_spans": [ |
|
{ |
|
"start": 53, |
|
"end": 76, |
|
"text": "[Sai Jayram et al. 2003", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic Segment Modeling", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "Attempts have been made to derive a universal collection of phones to cover all sounds described in an international phonetic inventory, e.g. International Phonetic Alphabet (IPA) or Worldbet [Hieronymus 1994 ]. In practice, this is a challenging endeavor because we need a large collection of labeled speech samples for all languages. Note that these sounds overlap considerably across languages. One possible approximation approach is to use a set of phonemes from several languages to form a superset, called an augmented phoneme inventory (API) here. This idea has been explored in previous works [Berkling and Barnard 1994a] [ Barnard 1994b] [Corredor-Ardoy et al. 1997] [Hazen and Zue 1994] . A good inventory needs to phonetically cover as many targeted languages as possible. This method can be effective when phonemes from all targeted languages form a closed set, as studied by Hazen and Zue [1994] . Human perceptual experiments have also shown a similar effect, where listeners' LID performance improved as their exposure to each language increased [Muthusamy et al. 1994 ].", |
|
"cite_spans": [ |
|
{ |
|
"start": 192, |
|
"end": 208, |
|
"text": "[Hieronymus 1994", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 601, |
|
"end": 629, |
|
"text": "[Berkling and Barnard 1994a]", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 632, |
|
"end": 675, |
|
"text": "Barnard 1994b] [Corredor-Ardoy et al. 1997]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 676, |
|
"end": 696, |
|
"text": "[Hazen and Zue 1994]", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 888, |
|
"end": 908, |
|
"text": "Hazen and Zue [1994]", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1061, |
|
"end": 1083, |
|
"text": "[Muthusamy et al. 1994", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Augmented Phoneme Inventory (API)", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "This API-based tokenization approach was recently explored [Ma et al. 2005] by using a set of all 124 phones and 4 noise units from English, Korean, and Mandarin, and by extrapolating them to nine other languages in the NIST LRE tasks. This set of 128 units is referred to as API-I in Table 1 , which is a proprietary phone set defined for the IIR-LID 1 database. Many preliminary LID experiments were conducted using the IIR-LID database and the API-I phone set. For example, we have explored an API-based approach to universal language characterization [Ma et al. 2005] and a text categorization approach to LID [Gao et al. 2005] , which formed the basis for the vector based feature extraction approach discussed in the next section. To expand the acoustic and phonetic coverage, we further used another larger set of APIs with 258 phones, from the six languages in the OGI-TS 2 multi-language telephone speech database. These six languages all appear in the NIST LRE tasks. This set will be referred to as API-II. A detailed breakdown of how the two phone sets were formed with phone counts for each language is given in Table 1 . ", |
|
"cite_spans": [ |
|
{ |
|
"start": 59, |
|
"end": 75, |
|
"text": "[Ma et al. 2005]", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 555, |
|
"end": 571, |
|
"text": "[Ma et al. 2005]", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 614, |
|
"end": 631, |
|
"text": "[Gao et al. 2005]", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 285, |
|
"end": 292, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
}, |
|
{ |
|
"start": 1125, |
|
"end": 1132, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Augmented Phoneme Inventory (API)", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The above phone-based language characterization approach suffers from two major shortcomings. First, a combined phone set from a limited set of multiple languages cannot easily be extended to cover new and rarely used languages. Second, a large collection of transcribed speech data is needed to train the acoustic and language phone models for each language. To alleviate these difficulties, a data-driven method that does not rely on exact phonetic transcriptions is preferred. It can be obtained by constructing consistent acoustic segment models (ASMs) [Lee et al. 1998 ] intended to cover the entire sound space of all spoken languages in an unsupervised manner.", |
|
"cite_spans": [ |
|
{ |
|
"start": 557, |
|
"end": 573, |
|
"text": "[Lee et al. 1998", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic Segment Model (ASM)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "As in other types of hidden Markov modeling, the initialization of ASMs is a critical factor for success. Note that the unsupervised, data-driven procedure for obtaining ASMs may result in many unnecessary small segments because of a lack of phonetic or prosodic constraints, (e.g. the number of segments in a word and the duration of an ASM) imposed during segmentation. This problem is especially severe when segmenting a huge collection of speech utterances from a large population of speakers with different language backgrounds. The API approach uses phonetically defined units in the sound inventory. It has the advantage of adopting phonetic constraints in the segmentation process. By using API to bootstrap ASM, our approach effectively incorporates some phonetic knowledge about a few languages in the initialization step to guide the ASM training process as described below:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic Segment Model (ASM)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Step 1: Carefully select a few languages, typically with large amounts of labeled data, and train language-specific phone models. Choose a set of J models for bootstrapping. The J models had better not to overlap very much according to their acoustic characteristics, and their number should be large enough to provide a reasonable acoustic coverage for all of the target languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic Segment Model (ASM)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Step 2: Use these J models to decode all training utterances in the training corpora. Assume the recognized sequences are \"true\" labels.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic Segment Model (ASM)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Step 3: Force-align and segment all utterances in the training corpora, using the available set of labels and HMMs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic Segment Model (ASM)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Step 4: Group all segments corresponding to a specific label into a class. Use these segments to re-train an HMM.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic Segment Model (ASM)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Step 5: Repeat steps 2-4 several times until convergence is achieved.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic Segment Model (ASM)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "In this procedure, we jointly optimize the J models as well as the segmentation of all utterances. This is equivalent to the commonly adopted segmental ML and k-means HMM training algorithm [Rabiner 1989 ] which adopt iterative optimization of segmentation and maximization. We have found that API-bootstrapped ASMs are more stable than the randomly initialized ASMs. It outperformed API by a big margin in the 1996 NIST LRE task as reported in [Ma et al. 2005] . The detailed results will be given in section 4.1.", |
|
"cite_spans": [ |
|
{ |
|
"start": 190, |
|
"end": 203, |
|
"text": "[Rabiner 1989", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 445, |
|
"end": 461, |
|
"text": "[Ma et al. 2005]", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic Segment Model (ASM)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "With an established acoustic inventory obtained using the ASM method, we can tokenize any given speech utterance to obtain a token sequence T , in a form similar to a text-like document. Note that ASMs are trained in a self-organized manner. We may not be able to establish a phonetic lexicon using ASMs and translate an ASM sequence into words. However, as far as LID is concerned, we are more interested in consistent tokenization than in the underlying lexical characterization of a spoken utterance. The self-organizing ASM modeling approach offers the key property that it does not require the training speech data to be directly or indirectly phonetically transcribed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic Segment Model (ASM)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Comparing the API and ASM methods, we find that the API method has better linguistic/phonetic grounding, while the ASM method is more acoustically oriented. Instead of using a bottom-up approach to derive purely acoustically oriented ASM units in an unsupervised manner, we use API to bootstrap the units.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic Segment Model (ASM)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The main difference between API and ASM lies in the relaxation of phone transcription for segmentation. In API, phone models are trained according to manually transcribed phone labels, while in ASM, segmentation is done in iterations using automatic recognition results. In this way, ASM gains two advantages: (i) it allows us to adjust a set of API phones from a small number of selected languages towards a larger set of targeted languages; (ii) ASMs can be trained on acoustic data similar to that used for the LID task, thus potentially minimizing the mismatch between the test data and the APIs that were trained on a prior set of phonetically transcribed speech data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic Segment Model (ASM)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "In this section, we will first briefly discuss prior works cast in the formalism of phone recognition (PR) and phone-based language modeling (LM). Then, we will propose our phone recognition frontend based on ASM acoustic modeling and our backend of vector space modeling for language classification. Note that the ASMs are no longer the phonemes defined in Table 1 . For easy reference, we will continue to refer to the ASM tokenization process as phone recognition (PR).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 358, |
|
"end": 365, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Frontend and Backend Formulations", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "A typical LID system is illustrated in Figure 1 , which shows a collection of parallel phone recognizers (PPR frontend) that serve as voice tokenizers, referred to as the frontend. A frontend converts spoken utterances into sequences of token symbols, or spoken documents. It is followed by a set of n-gram phone language models (LM) that impose constraints on phone decoding and provide language scores. The LM pool converts an input spoken utterance into a vector of interpolated LM scores. The language models and the classifier are referred to as the backend. The backend classifier models a spoken language using a collection of training samples, in the form of LM score vectors.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 39, |
|
"end": 47, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "PPR-LM Configuration", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Generally speaking, a probabilistic language classifier can be formulated as follows.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Figure 1. Block diagram of a PPR-LM LID system", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "O of length \u03c4 , 1 2 { , ..., } O o o o \u03c4 =", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Given a sequence of feature vectors", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": ", we can express the a posteriori probability of language l using Bayes Theorem as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Given a sequence of feature vectors", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "( ) ( ) , ( | ) ( | ) ( ) / ( ) | , | ( ) / ( ) AM LM f fl T P l O P O l P l P O P O T P T P l P O \u03bb \u03bb \u2200 = = \u2211 ,", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Given a sequence of feature vectors", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where T is a candidate token sequence, and AM f \u03bb is the acoustic model for the f-th phone recognizer, while", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Given a sequence of feature vectors", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": ", LM f l \u03bb", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Given a sequence of feature vectors", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "is the l-th language model for the f-th phone recognizer. Now we can apply the maximum a posteriori decision rule as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Given a sequence of feature vectors", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "PR-1: Chinese PR-2: English PR-F: French LM-L: French LM-1 \u2026 LM-L LM-L: French LM-1 \u2026 LM-L LM-L: French LM-1 \u2026 LM-L Spoken utterance PPR-Frontend LM-Backend Lang-1 Lang-L Lang-2 ( ) ( ) , , arg max | , | ( ) / ( ) AM LM f fl T f l l P O T P T P l P O \u03bb \u03bb \u2200 = \u2211 ,", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Given a sequence of feature vectors", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where the first term on the right hand side of (2) is the probability of O given T and its acoustic model AM f \u03bb , the second term is the language probability of T given the language model , LM f l \u03bb , and the last term is the prior probability P(l), which is often assumed to be equal for all languages. The observation probability, P(O), is not a function of the language and can be removed from the optimization function.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Given a sequence of feature vectors", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The exact computation in (2) involves summing over all possible token sequences. In practice, it can be approximated by finding the most likely phone sequence \u02c6f T , for each phone recognizer f, using the Viterbi algorithm:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Given a sequence of feature vectors", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "( ) arg max | , f AM f f T B T POT\u03bb \u2208 = ,", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Given a sequence of feature vectors", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where f B is the set of all possible token sequences from the f-th phone recognizer. As such, a solution to (2) can be approximated as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Given a sequence of feature vectors", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "( ) ( ) , ,\u02c6\u00e2 rg max log | , log | AM LM f f f f l f l l POT PT \u03bb \u03bb \u23a1 \u23a4 \u2248 + \u23a2 \u23a5 \u23a3 \u23a6 .", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Given a sequence of feature vectors", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We assume that the F parallel language-dependent acoustic phone models can be used to approximate the acoustic space of L languages. After a spoken utterance is decoded by the F recognizers, it needs to be evaluated by a set of F L \u00d7 language models to establish comparability. The system formulated by (3) and (4) is known as parallel PRLM, or P-PRLM [Zissman 1996] . In this paper, it will be referred to as PPR-LM to identify its PPR frontend and LM backend.", |
|
"cite_spans": [ |
|
{ |
|
"start": 352, |
|
"end": 366, |
|
"text": "[Zissman 1996]", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Given a sequence of feature vectors", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In prior works, researchers also looked into a language-independent phone recognizer with a set of universal acoustic units, or phones that are common to all languages. The formulations of (3) and (4) can be simplified as a two-step optimization:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "UPR-LM Configuration", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "( ) arg max log | , AM T B T POT\u03bb \u2208 \u23a1 \u23a4 = \u23a2 \u23a5 \u23a3 \u23a6 ,", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "UPR-LM Configuration", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "( )\u00e2 rg max log | LM l l A l PT\u03bb \u2208 \u23a1 \u23a4 = \u23a2 \u23a5 \u23a3 \u23a6 ,", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "UPR-LM Configuration", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where B is the set of all possible token sequences for all languages. The acoustic probability on the right hand side of (5) is now the same for all competing languages. Only a language-specific score on the right hand side of (6) is used for score comparison to select the identified language. As such, the PPR-LM system can be simplified as the UPR-LM system with a universal phone recognition (UPR) frontend as shown in Figure 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 423, |
|
"end": 431, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "UPR-LM Configuration", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "A number of UPR-LM systems have been proposed along these lines, such as the ALI system [Hazen and Zue 1994] , the single-language PRLM system [Zissman 1996] , and the language-independent phone recognition approach [Corredor-Ardoy et al. 1997] . However, the training of phone sets in these systems requires phonetic transcription of all training utterances.", |
|
"cite_spans": [ |
|
{ |
|
"start": 88, |
|
"end": 108, |
|
"text": "[Hazen and Zue 1994]", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 143, |
|
"end": 157, |
|
"text": "[Zissman 1996]", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 216, |
|
"end": 244, |
|
"text": "[Corredor-Ardoy et al. 1997]", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Figure 2. Block diagram of a UPR-LM LID system", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this paper, we propose a new way of training the set of universal acoustic units using the ASM approach described in Section 2.2, where acoustic models are trained in a self-organized and unsupervised manner. This provides two obvious advantages: (1) the unsupervised strategy allows the frontend to adapt easily to new languages without the need for phonetic transcription; (2) the universal acoustic units can be flexibly partitioned into subsets to work for the parallel phone recognition (PPR) frontend as shown in Figure 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 522, |
|
"end": 530, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Figure 2. Block diagram of a UPR-LM LID system", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Vector space modeling (VSM) has become a standard tool in Information Retrieval (IR) systems since its introduction decades ago [Salton 1971 ]. It uses a vector to represent a text document. One of the advantages of the method is that it allows the discriminative training of classifiers over the document vectors. We can derive the distance between documents easily as long as the vector attributes are well defined characteristics of the documents. Each coordinate in the vector reflects the presence of the corresponding attribute.", |
|
"cite_spans": [ |
|
{ |
|
"start": 128, |
|
"end": 140, |
|
"text": "[Salton 1971", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Vector Space Modeling for Language Classification", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Inspired by the idea of document vectors in text categorization research, we would like to investigate a new concept of the LID classifier, using vector space modeling. A spoken language will always contain a set of high frequency function words, prefixes, and suffixes, ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Vector Space Modeling for Language Classification", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "which are realized as acoustic unit substrings in spoken documents. Individually, these substrings may be shared across languages. Collectively, the pattern of their co-occurrences discriminates one language from another.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "UPR-Frontend", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Suppose that the sequence of feature vectors O is decoded into a sequence of \u2126 acoustic units", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "UPR-Frontend", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "1 { ,..., ,..., } T t t t \u03c0 \u2126 =", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "UPR-Frontend", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": ", where each unit is drawn from the universal ASM inventory of J models in a UPR frontend,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "UPR-Frontend", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "1 2 { , ,... } J t w w w \u03c0 \u2208", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "UPR-Frontend", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": ". One is able to establish a high-dimensional salient feature vector which is language independent, where all of its elements are expressed as the n-gram probability attributes 1 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "UPR-Frontend", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "( | ,... ) n n p w w w \u2212 = 1 1 1 1 ( | ,..., ) n n n p t w t w t w \u03c0 \u03c0 \u03c0 \u2212 \u2212 + \u2212 = = = .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "UPR-Frontend", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Its dimension is equal to the total number of n-gram patterns needed to highlight the overall behavior of an utterance:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "UPR-Frontend", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "( ) 1 2 1 3 1 2 ( ),..., ( | ),..., ( | , ),... p w p w w p w w w \u03bb = .", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "UPR-Frontend", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The vector \u03bb is also called a bag-of-sounds (BOS) vector , which represents a spoken utterance in a document vector in a same way as in text-based document vector representation [Gao et al. 2005 ] [Salton 1971 ]. The vector space modeling approach evaluates the goodness of fit, or score function, using a vector-based distance, such as an inner product:", |
|
"cite_spans": [ |
|
{ |
|
"start": 178, |
|
"end": 194, |
|
"text": "[Gao et al. 2005", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 197, |
|
"end": 209, |
|
"text": "[Salton 1971", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "UPR-Frontend", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "( ) L M T l l P T \u03bb \u03bb \u03c9 \u221d \u22c5 ,", |
|
"eq_num": "(8)" |
|
} |
|
], |
|
"section": "UPR-Frontend", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where l \u03c9 is a language-dependent weight vector with dimension equal to \u03bb , with each component representing the contribution of its individual n-gram probability to the overall language score. The spoken document vector in (7) is high dimensional in nature as high order n-gram patterns are included. This makes it suitable for discriminative feature extraction and selection.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "UPR-Frontend", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For the PPR frontend, the sequence of feature vectors O is decoded into F independent sequences of acoustic units. A BOS vector f \u03bb can be derived from each sequence in the same way as in (7) for each phone recognizer. A grand BOS vector is, therefore, constructed by concatenating the F vectors f \u03bb to represent the input spoken utterance. With multiple tokenizers, we hope that the grand BOS vector will describe the input spoken utterance in a greater detail.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "UPR-Frontend", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Term weighting [Bellegarda 2000 ] is widely used to render the value of the attribute in a document vector by taking into account the frequency of occurrence of each attribute. It is interesting to note that attribute patterns which often occur in a few documents but not as often in others provide high indexing power for these documents. On the other hand, patterns which occur very often in all documents possess little indexing power. This desirable property has led to the development of a number of term weighting schemes, such as tf-idf, that are commonly used in information retrieval [Salton 1971 ], natural language call routing [Kuo and Lee 2003] , and text categorization [Gao et al. 2004] . We adopt the standard tf-idf term weighting scheme in this paper.", |
|
"cite_spans": [ |
|
{ |
|
"start": 15, |
|
"end": 31, |
|
"text": "[Bellegarda 2000", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 593, |
|
"end": 605, |
|
"text": "[Salton 1971", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 639, |
|
"end": 657, |
|
"text": "[Kuo and Lee 2003]", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 684, |
|
"end": 701, |
|
"text": "[Gao et al. 2004]", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "UPR-Frontend", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Note that the variations Barnard 1994a] [Corredor-Ardoy et al. 1997] [ Hazen and Zue 1994] [Zissman 1996 ] of LM backend systems proposed in prior works used cross-entropy or perplexity based language model scores, which are based on similarity matching, for language classification decision-making. The VSM can be seen as an attempt to enhance the discrimination power offered by n-gram phonotactic information.", |
|
"cite_spans": [ |
|
{ |
|
"start": 25, |
|
"end": 68, |
|
"text": "Barnard 1994a] [Corredor-Ardoy et al. 1997]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 71, |
|
"end": 90, |
|
"text": "Hazen and Zue 1994]", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 91, |
|
"end": 104, |
|
"text": "[Zissman 1996", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "UPR-Frontend", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "With the universal ASM acoustic units in place, any spoken utterance can now be tokenized with a set of \"key terms\" so that their patterns and statistics can be used to discriminate between individual spoken documents. The given collection of spoken documents in the training set from a particular language forms the same language category. LID can be considered the process of classifying a spoken document into some pre-defined language categories. An unknown testing utterance to be identified can be represented as a query vector, and LID can then be performed as in text document classification [Joachims 2002] . We can then utilize any classifier learning technique, such as support vector machine [Sebastiani 2002] or artificial neural network [Haykin 1994 ], developed by the text categorization community to design language classifiers. An LID system with the VSM-backend is shown in Figure 3 for the PPR frontend and in Figure 4 for the UPR frontend. The VSM-backend takes as inputs n-gram statistics in the form of document vectors. The backend structure remains the same for both the UPR and PPR frontends, so long as we can represent the voice tokenizations from the PPR/UPR frontend in document vectors. With the document vectors from the training database, the backend groups training document vectors into language classes.", |
|
"cite_spans": [ |
|
{ |
|
"start": 600, |
|
"end": 615, |
|
"text": "[Joachims 2002]", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 704, |
|
"end": 721, |
|
"text": "[Sebastiani 2002]", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 751, |
|
"end": 763, |
|
"text": "[Haykin 1994", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 893, |
|
"end": 901, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 930, |
|
"end": 938, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "VSM-Backend", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "Spoken utterances", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "PR-1", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "PR-F PR-2 Lang-1 Lang-2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "PPR-Frontend VSM-Backend", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Lang-L", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "PPR-Frontend VSM-Backend", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "There are many ways to reduce the dimension of the document vectors and to enhance the discriminative ability, such as by applying latent semantic indexing (LSI). In this paper, we propose to use a set of output scores from an array of support vector machines (SVMs) as the dimension-reduced vector for the final classifier. For each of L target languages, we have a number of high dimensional training vectors as shown in (7). An SVM is a 2-way classifier used to partition the high dimensional vector space. We construct an SVM between each of the language pairs. As a result, we obtain ( 1)/2 L L \u00d7 \u2212 pair-wise SVM classifiers for the L target languages. For each input utterance, an output score is generated from each of the pair-wise SVM classifiers, resulting in a vector of", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Classifiers in VSM-Backend", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "( 1)/2 L L \u00d7 \u2212 dimensions that represent ( 1)/2 L L \u00d7 \u2212", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Classifiers in VSM-Backend", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "pair-wise language discriminative scores, called a discriminative vector. The linear kernel is adopted for the SVMs in the SVMlight V6.01 tool 3 implementation. In this way, each language category can be represented by a Gaussian mixture model (GMM) which is trained on the discriminative vectors of the training utterances. The GMM classifiers are built as part of the VSM-backend for decision-making. At run-time, the VSM-backend identifies the language of a spoken document in language recognition/detection trials and verifies the language identity of a spoken document in language verification trials.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Classifiers in VSM-Backend", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "To summarize, we have discussed an LID paradigm of two frontend options for voice tokenization, PPR or UPR, and two backend options, LM or VSM. The PPR-LM and UPR-LM configurations were well studied in the previous works. However, a systematic comparison among the PPR-LM, UPR-LM, PPR-VSM and UPR-VSM configurations has not 3 http://svmlight.joachims.org/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Classifiers in VSM-Backend", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "VSM-Backend", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "UPR-Frontend", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "UPR Lang-1 Lang-2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "UPR-Frontend", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Lang-L been made. Thus, we conducted a comparative study over the four combinations of frontends and backends based on ASM acoustic units.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "UPR-Frontend", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We followed the experiment setup in the NIST Language Recognition Evaluation (LRE) tasks 4 . The tasks were intended to establish a baseline of performance capability for language recognition of conversational telephone speech. The evaluation was carried out on recorded telephony speech in 12 languages, Arabic, English, Farsi, French, German, Hindi, Japanese, Korean, Mandarin, Spanish, Tamil, and Vietnamese, for the 1996, 2003 NIST LRE tasks, and in 7 languages, English, Hindi, Japanese, Korean, Mandarin, Spanish, and Tamil for the 2005 NIST LRE task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 305, |
|
"end": 399, |
|
"text": "Arabic, English, Farsi, French, German, Hindi, Japanese, Korean, Mandarin, Spanish, Tamil, and", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 400, |
|
"end": 430, |
|
"text": "Vietnamese, for the 1996, 2003", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "In this paper, training sets for building models came from two corpora, namely: (i) the 6-language OGI-TS database with English, German, Hindi, Japanese, Mandarin, and Spanish; and (ii) the 12-language LDC CallFriend 5 database. The OGI-TS database was only used to bootstrap the acoustic models of an initial set of phones. It consists of telephone speech with phonetic transcriptions. In addition, the CallFriend database was used for full fledged ASM acoustic modeling, backend language modeling and classifier design. It contains telephone conversations in the same 12 languages that are in the 1996 and 2003 NIST LRE tasks, but without phonetic transcriptions. The two databases are independent of each other.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "In the OGI-TS database, there is less than 1 hour of speech in each language. In the CallFriend database, each of the 12 language databases consists of 40 telephone conversations with each lasting approximately 30 minutes, giving a total of about 20 hours per language. In language modeling, each conversation in the training set is segmented into overlapping sessions, resulting in about 12,000 sessions for each of three durations per language. These three durations are 3 seconds, 10 seconds, and 30 seconds. The 1996 NIST LRE evaluation data consists of 1,503, 1,501, and 1,492 sessions for 3 seconds, 10 seconds, and 30 seconds respectively. The 2003 NIST LRE evaluation data consist of 1,200 sessions per duration. The 2005 NIST LRE evaluation data consist of 3,662 sessions per duration.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "Our early research on API and ASM [Ma et al. 2005] showed the following:", |
|
"cite_spans": [ |
|
{ |
|
"start": 34, |
|
"end": 50, |
|
"text": "[Ma et al. 2005]", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Frontend Acoustic Modeling", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "(1) The ASM frontend outperformed the API frontend when followed by the VSM backend;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Frontend Acoustic Modeling", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In the language identification task on the 12 languages in the 1996 NIST LRE evaluation data (30 seconds only), 128 API units were trained with the API-I phone set by using the IIR-LID database, and 128 ASM units were further obtained based on the bootstrapping of APIs using the CallFriend database. With the UPR-VSM setup using the BOS vectors containing both unigram and bi-gram, an error rate of 13.9% was achieved with ASMs, while the error rate with APIs was 19.2%.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Frontend Acoustic Modeling", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "(2) Higher ASM coverage, with a larger ASM inventory and higher order n-gram (trigram), improved the LID performance;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Frontend Acoustic Modeling", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Under the same experiment setups as in (1), we investigated the effects of the acoustic coverage by clustering the 128 ASM units into 64 and 32 ASMs according to acoustic similarity. Table 2 compares the acoustic and linguistic coverage achieved using 32, 64, and 128 AMS units, and by using unigram, bi-gram, and trigram. It shows that these reduced-sized ASM units greatly impaired the discrimination power of the ASM systems. We needed a reasonable number of ASM units that was large enough in order to cover the sound variation in all of the languages. (3) Note that the initialization of acoustic model has a strong impact on the resulting models in HMM training. Apparently, API phone models provide good initialization for ASM models.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 183, |
|
"end": 190, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Frontend Acoustic Modeling", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In the following experiments, we used phonetically labeled OGI-TS corpus to train API-II phones, as shown in Table 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 109, |
|
"end": 116, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Frontend Acoustic Modeling", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "For each utterance, 39-dimensional features consisting of 12 MFCCs and normalized energy, plus their first and second order time derivatives were extracted for each frame. Utterance based cepstral mean subtraction was applied to the features to remove channel distortion. A two-step modeling approach was adopted. First, the language dependent phonemes in API-II were trained language by language based on the phonetic training database. Each phoneme was modeled with an HMM of 3 states. The resulting 258 API-II phonemes were then used to bootstrap 258 ASM models. The 258 ASM models were further trained based on the 12 language CallFriend database in an unsupervised manner as described in Section 2.2. The average segment lengths of the 258 ASM models based on the CallFriend database ranged from 33 ms to 150 ms.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Frontend Acoustic Modeling", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "First, the 15-language/dialect 6 training data in the CallFriend database was tokenized to obtain a collection of text-like phone sequences from each of the 6 tokenizers. We computed PPR-LM scores based on the resulting phone sequences. We trained up to 3-gram phone LMs for each PPR-LM tokenizer-target language pair, resulting in 15 6 90 \u00d7 = LMs. For each input utterance, 90 interpolated scores were derived to form a vector. In this way, the training utterances could be represented by a collection of 90-dimension score vectors. Similarly, for UPR-LM, we trained up to 3-gram phone LMs for each of the target languages, resulting in 15 LMs. The training utterances were then represented by a collection of 15-dimension score vectors. Both PPR-LM and UPR-LM shared the same LM backend design, which adopted the framework of PR-LM. The low dimension score vectors could be modeled by the Gaussian Mixture Model (GMM) [Torres-Carrasquillo et al. 2002] .", |
|
"cite_spans": [ |
|
{ |
|
"start": 920, |
|
"end": 953, |
|
"text": "[Torres-Carrasquillo et al. 2002]", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Backend Classifier", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Next, we will discuss the VSM backend classifier . The VSM backend first converted the text-like tokenization sequences into BOS vectors as discussed in Section 3.3. Then the BOS vectors were further processed by the support vector machines to derive", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Backend Classifier", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "( 1)/2 L L \u00d7 \u2212 dimensional discriminative vectors. For a frontend of 6 languages, English, Mandarin, Japanese, Hindi, Spanish and German, there were 258 phonemes in total. In the case of UPR, we derived a BOS vector containing both mono-phones and bi-phones with 66,822 (= 258 2 + 258) elements. In the case of PPR, we derived a BOS vector with 11,708 (= 48 2 +39 2 +52 2 +51 2 +32 2 +36 2 +48 +39 +52 +51 +32 +36) elements. The BOS vectors were then reduced to a discriminative vector of 105 15 14 / 2 = \u00d7 dimensions for an evaluation task involving 15 target languages. In this study, both LM score vectors and BOS discriminative vectors were modeled by the GMM classifier.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Backend Classifier", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The main difference between the LM and the VSM backend classifier lies in the representation of the document vector. In LM backend, the document vector is characterized by interpolated LM scores, while in VSM backend, the document vector is derived from outputs of support vector machines, which introduce discriminative ability between language pairs. If we see the LM backend as a likelihood-based classifier, then the VSM backend is a discrimination-motivated classifier.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Backend Classifier", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We have discussed two different frontends, PPR and UPR, and two different backends, LM and VSM. To gain insight into the behavior of each of the frontends and backends, it is desirable to investigate the performance of each of the four combined systems as shown in Figure 5 , namely, PPR-LM, PPR-VSM, UPR-LM, and UPR-VSM, where the PPR/UPR frontends are built on a set of universal ASMs.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 265, |
|
"end": 273, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Four LID Systems", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Without loss of generality, we deployed the same 258-ASM with two different settings. First, the 258 ASMs were arranged in a 6-language PPR frontend. They were redistributed according to their API-II definitions into 6 languages. Second, they were lumped together in a single UPR frontend. The training of the 258-ASM was discussed in Section 2.2. We used the GMM classifier in the LM backend and VSM backend, in which we trained 512-mixture GMMs to model the desired language and to model all its competing languages, and reported the equal error rates (EER%) between false-alarm and miss-detect.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Four LID Systems", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The UPR-VSM system follows the block diagram of the language-independent acoustic phone recognition approach [Ma et al. 2005] . PPR-LM was implemented as in [Zissman 1996 ]. The LM backend uses trigrams to derive phonotactic scores. The results for the 1996, 2003 and 2005 NIST LRE tasks are shown in Tables 3, 4, and 5, respectively. In Table 6 , we also report the execution times for the 2003 NIST LRE task obtained in terms of the real-time-factor (xRT) with an Intel Xeon 2.80 GHz CPU.", |
|
"cite_spans": [ |
|
{ |
|
"start": 109, |
|
"end": 125, |
|
"text": "[Ma et al. 2005]", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 157, |
|
"end": 170, |
|
"text": "[Zissman 1996", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 253, |
|
"end": 272, |
|
"text": "1996, 2003 and 2005", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 338, |
|
"end": 345, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Figure 5. Block diagram of four combinations of frontends and backends", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Before discussing results, we will examine the effects of the combined frontends and backends. In the combined systems, there are two unique frontend settings, PPR and UPR. PPR converts an input spoken utterance into 6 spoken documents using the parallel frontend, while UPR converts an input into a single document. However, there are four unique LM and VSM backend settings. The LM in PPR-LM and that in the UPR-LM are different; the former has 15 6 \u00d7 n-gram language models, while the latter only has 15 language models. In other words, the former LM classifier is more complex, with a larger number of parameters, than the latter. The VSM in PPR-VSM and the VSM in UPR-VSM have different levels of complexity as well. The former VSM processes vectors with 11,708 dimensions, while the latter processes those with 66,822 dimensions, as discussed in Section 4.2. The vectors in PPR-VSM and UPR-VSM are shown in Figure 6 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 913, |
|
"end": 921, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Figure 5. Block diagram of four combinations of frontends and backends", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "PPR Frontend UPR Frontend LM Backend VSM Backend", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Figure 5. Block diagram of four combinations of frontends and backends", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Although the dimensionality of V-PPR is lower than that of V-UPR, V-PPR is 6 times as dense as V-UPR, resulting in more complex support vector machine partitions (SVM) [Vapnik 1995] . In other words, the VSM classifier in the PPR-VSM is more complex than that in UPR-VSM. In terms of the overall classifier backend complexity, we rank the four systems from high to low as follows: PPR-VSM, PPR-LM or UPR-VSM, and UPR-LM. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 168, |
|
"end": 181, |
|
"text": "[Vapnik 1995]", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Figure 5. Block diagram of four combinations of frontends and backends", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Summarizing the results obtained in the three NIST LRE tasks, we have the following findings:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Figure 6. Two different spoken document vectors in PPR-VSM and UPR-VSM", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(i) The VSM backend demonstrates a clear advantage over the LM backend for the 30-second and 10-second trials. This can be easily explained by the fact that VSM models are designed to capture phonotactics over the context of the whole spoken document. As a result, VSM favors longer utterances which provide richer long span phonotactic information.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Figure 6. Two different spoken document vectors in PPR-VSM and UPR-VSM", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(ii) The system performance highly correlates with the complexity of the system architectures. This can be seen in Tables 3, 4 , and 5, which show that PPR-VSM achieved the best result with an EER of 2.75%, 3.62%, and 5.78% in the 30-second 1996, 2003 and 2005 NIST LRE tasks, respectively, followed by PPR-LM, UPR-VSM, and UPR-LM. Note that we can increase the system complexity by using more PPRs. We expect that more PPRs will improve the PPR-VSM system performance further.", |
|
"cite_spans": [ |
|
{ |
|
"start": 231, |
|
"end": 260, |
|
"text": "30-second 1996, 2003 and 2005", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 115, |
|
"end": 126, |
|
"text": "Tables 3, 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Figure 6. Two different spoken document vectors in PPR-VSM and UPR-VSM", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(iii) Although PPR-LM outperformed UPR-VSM in general, the UPR frontend was superior in computational efficiency during run-time operation over the PPR frontend. In Table 6 , we find that the systems with the UPR frontend ran almost 60% faster than those with the PPR frontend.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 165, |
|
"end": 172, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Figure 6. Two different spoken document vectors in PPR-VSM and UPR-VSM", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "As a general remark, ASM-based acoustic modeling not only offers an effective unsupervised training procedure and hence, low development cost, but also efficient run-time operation as in the case of the UPR frontend. More importantly, it delivers outstanding system performance. VSM is the choice for the backend when longer utterances are available, while PPR-VSM delivers the best result in the comprehensive benchmarking for 30-second test condition.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Figure 6. Two different spoken document vectors in PPR-VSM and UPR-VSM", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "LID technology has gone through many years of evolution. Many results have been published in the literature for the 1996 and 2003 NIST LRE tasks. They provide good benchmarks for new technology development. Here, we summarize some recently reported results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overall Performance Comparison", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "For the sake of brevity, we only compare results obtained in the 30-second tests, which represent the primary condition of interest in the NIST LRE tasks. Systems 1, 2, and 3 in Table 7 were trained and tested on the same databases. Therefore, the results can be directly compared. They are extracted from Tables 3 and 4. We also cite two results from recent reports [Gauvain et al. 2004] [Singer et al. 2003] as references. Table 7 shows that the performance of PPR-VSM system is among the best in the 1996 and 2003 NIST LRE tasks. Ma et al. [2005] reported that the API-bootstrapped ASM outperformed API phone models in the LID task. This paper extends our previous work through comprehensive benchmarking, which produced further findings and validated the effectiveness of the proposed VSM solution. The systems reported in this paper contributed to the ensemble classifier that participated in the 2005 NIST LRE representing IIR site.", |
|
"cite_spans": [ |
|
{ |
|
"start": 367, |
|
"end": 388, |
|
"text": "[Gauvain et al. 2004]", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 389, |
|
"end": 409, |
|
"text": "[Singer et al. 2003]", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 533, |
|
"end": 549, |
|
"text": "Ma et al. [2005]", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 178, |
|
"end": 185, |
|
"text": "Table 7", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 425, |
|
"end": 432, |
|
"text": "Table 7", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Overall Performance Comparison", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "The proposed VSM-based language classifier compares phonotactic statistics from spoken documents. We have not explored the use of acoustic scores resulting from the tokenization process. It was reported that combining information of acoustic scores along with phonotactic statistics produced good results [Corredor-Ardoy et al. 1997] [Singer et al. 2003 ] [Torres-Carrasquillo et al. 2002 . Furthermore, fusion of phonotactic statistics at different levels of resolutions also improved overall performance [Lim et al. 2005] . We have good reason to expect that fusion among our 4 combinative systems, or between our systems and other existing methods, including GMM tokenizer [Torres-Carrasquillo et al. 2002] , will lead to further improvements. [Gauvain et al. 2004] 3.20 4.00 5 Parallel PRLM [Singer et al. 2003 ] 5.60 6.60", |
|
"cite_spans": [ |
|
{ |
|
"start": 305, |
|
"end": 333, |
|
"text": "[Corredor-Ardoy et al. 1997]", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 334, |
|
"end": 353, |
|
"text": "[Singer et al. 2003", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 354, |
|
"end": 388, |
|
"text": "] [Torres-Carrasquillo et al. 2002", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 506, |
|
"end": 523, |
|
"text": "[Lim et al. 2005]", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 676, |
|
"end": 709, |
|
"text": "[Torres-Carrasquillo et al. 2002]", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 747, |
|
"end": 768, |
|
"text": "[Gauvain et al. 2004]", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 795, |
|
"end": 814, |
|
"text": "[Singer et al. 2003", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overall Performance Comparison", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "We have studied the effects of frontends and backends in the LID system. In the following, we summarize our findings. (1) A vector space modeling (VSM) backend consistently outperformed the LM backend in the combination tests;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "(2) The PPR-VSM system configuration demonstrated superior performance across all of the primary tasks (30-second tests);", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "(3) The UPR frontend was effective in run-time operation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "In this study, we formulated both LM backend and VSM backend classifiers as a vector classification problem. The traditional LM backend applies similarity based approach to the vector representation of spoken documents. The VSM backend represents spoken documents using discriminative vectors derived from the outputs of support vector machines. We achieved EERs of 2.75% and 3.62% in the 30-second 1996 and 2003 NIST LRE tasks respectively with the PPR-VSM system. These are some of the best reported results for a single LID classifier. The VSM backend was also successfully implemented in IIR's submission to 2005 NIST LRE. The good results can be credited to the enhanced discriminatory ability of the VSM backend.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "Exploring the bag-of-sounds spoken document vectors using the bigram statistics of ASM acoustic units, we found that one of the advantages of the VSM method is that it can represent a document with heterogeneous attributes (a mix of unigram, bigram, etc). Inspired by the feature reduction results, we believe that the bag-of-sounds vector can be extended to accommodate trigram statistics and acoustic features as well.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "Language Identification Corpus of the Institute for Infocomm Research 2 http://cslu.cse.ogi.edu/corpora/corpCurrent.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://www.nist.gov/speech/tests/index.htm 5 See http://www.ldc.upenn.edu/. The overlapping between the CallFriend database and the 1996 LRE data was removed from the training data as suggested in http://www.nist.gov/speech/tests/index.htm for the 2003 evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In the 12-language CallFriend database, English, Mandarin, and Spanish have two dialects, respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We have successfully treated LID as a text categorization application with the topic category being the language identity itself. The VSM method can be extended to other spoken document classification tasks as well, for example, multilingual spoken document categorization by topic. We are also interested in exploring other language-specific features, such as syllabic and tonal properties. It is quite straightforward to incorporate specific salient features and examine their benefits. Furthermore, some high-frequency, language-specific words can also be converted into acoustic words and included in an acoustic word vocabulary, in order to increase the indexing power of these words for their corresponding languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "acknowledgement", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Phonetic Knowledge, Phonotactics and Perceptual Validation for Automatic Language Identification", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Adda-Decker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Antoine", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Mareuil", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Vasilescu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Lamel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Vaissiere", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Geoffrois", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J.-S", |
|
"middle": [], |
|
"last": "Lienard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of the 15th International Congress of Phonetic Sciences", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "747--750", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adda-Decker, M., F. Antoine, P.B. Mareuil, I. Vasilescu, L. Lamel, J. Vaissiere, E. Geoffrois, and J.-S. Lienard, \"Phonetic Knowledge, Phonotactics and Perceptual Validation for Automatic Language Identification,\" In Proceedings of the 15th International Congress of Phonetic Sciences, 2003, pp. 747-750.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Exploiting Latent Semantic Information in Statistical Language Modeling", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Bellegarda", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proceedings of IEEE", |
|
"volume": "88", |
|
"issue": "", |
|
"pages": "1279--1296", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bellegarda, J.R., \"Exploiting Latent Semantic Information in Statistical Language Modeling,\" In Proceedings of IEEE, 88(8), 2000, pp. 1279-1296.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Analysis of phoneme-based features for language identification", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Berkling", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Barnard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "International Conference on Acoustics, Speech & Signal Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "289--292", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Berkling, K.M., and E. Barnard, \"Analysis of phoneme-based features for language identification,\" International Conference on Acoustics, Speech & Signal Processing, 1994a, vol. 1, pp. 289-292.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Language identification of six languages based on a common set of broad phonemes", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Berkling", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Barnard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "International Conference on Spoken Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1891--1894", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Berkling, K.M., and E. Barnard, \"Language identification of six languages based on a common set of broad phonemes,\" International Conference on Spoken Language Processing, 1994b, pp. 1891-1894.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Language identification with language-independent acoustic models", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Corredor-Ardoy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Gauvain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Adda-Decker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Lamel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Speech Communication and Technology", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "55--58", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Corredor-Ardoy, C., J.L. Gauvain, M. Adda-Decker, and L. Lamel, \"Language identification with language-independent acoustic models,\" 5 th European Conference on Speech Communication and Technology, 1997, vol. 1, pp. 55-58.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "A novel feature combination approach for spoken document classification with support vector machines", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "U", |
|
"middle": [], |
|
"last": "Iurgel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Rigoll", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Multimedia Information Retrieval Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--5", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dai, P., U. Iurgel, and G. Rigoll, \"A novel feature combination approach for spoken document classification with support vector machines,\" Multimedia Information Retrieval Workshop, 2003, pp.1-5.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "A text-categorization approach to spoken language identification", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C.-H", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Speech Communication and Technology (Interspeech)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2837--2840", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gao, S., B. Ma, H. Li, and C.-H. Lee, \"A text-categorization approach to spoken language identification,\" 9 th European Conference on Speech Communication and Technology (Interspeech), 2005, pp. 2837-2840.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "A MFoM learning approach to robust multiclass multi-label text categorization", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C.-H", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T.-S", |
|
"middle": [], |
|
"last": "Chua", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "329--336", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gao, S., W. Wu, C.-H. Lee, and T.-S. Chua, \"A MFoM learning approach to robust multiclass multi-label text categorization,\" International Conference on Machine Learning, 2004, pp. 329-336.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Language recognition using phone lattices", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Gauvain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Messaoudi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Schwenk", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "International Conference on Spoken Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gauvain, J.L., A. Messaoudi, and H. Schwenk, \"Language recognition using phone lattices,\" International Conference on Spoken Language Processing, 2004.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Neural Networks: A comprehensive foundation", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Haykin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Haykin, S., Neural Networks: A comprehensive foundation, McMillan, 1994.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Recent Improvements in An Approach to Segment-Based Automatic Language Identification", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Hazen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Zue", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "International Conference on Spoken Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1883--1886", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hazen, T.J., and V. W. Zue, \"Recent Improvements in An Approach to Segment-Based Automatic Language Identification,\" International Conference on Spoken Language Processing, 1994, pp. 1883 -1886.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "ASCII phonetic symbols for the world's languages: Worldbet", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Hieronymus", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hieronymus, J.L. \"ASCII phonetic symbols for the world's languages: Worldbet,\" Technical Report AT&T Bell Labs, 1994.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Learning to classify text using support vector machines", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Joachims", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joachims, T., Learning to classify text using support vector machines, Kluwer Academic Publishers, 2002.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Mixed Memory Markov Models for Automatic Language Identification", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Kirchhoff", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Parandekar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Bilmes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "International Conference on Acoustics, Speech & Signal Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "761--764", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kirchhoff, K., S. Parandekar, and J. Bilmes, \"Mixed Memory Markov Models for Automatic Language Identification,\" International Conference on Acoustics, Speech & Signal Processing, 2002, vol. 1, pp. 761-764.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Discriminative training of natural language call routers", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"K J" |
|
], |
|
"last": "Kuo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C.-H", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "IEEE Trans. on Speech and Audio Processing", |
|
"volume": "11", |
|
"issue": "1", |
|
"pages": "24--35", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kuo, H.K.J., and C.-H. Lee, \"Discriminative training of natural language call routers,\" IEEE Trans. on Speech and Audio Processing, 11(1), 2003, pp. 24-35.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "From Knowledge-Ignorant to Knowledge-Rich Modeling: A New Speech Research Paradigm for Next Generation Automatic Speech Recognition", |
|
"authors": [ |
|
{ |
|
"first": "C.-H", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "International Conference on Spoken Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "109--112", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lee, C.-H., \"From Knowledge-Ignorant to Knowledge-Rich Modeling: A New Speech Research Paradigm for Next Generation Automatic Speech Recognition,\" International Conference on Spoken Language Processing, 2004, pp.109-112.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "A Segment Model Based Approach to Speech Recognition", |
|
"authors": [ |
|
{ |
|
"first": "C.-H", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Soong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B.-H", |
|
"middle": [], |
|
"last": "Juang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "International Conference on Acoustics, Speech & Signal Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "501--504", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lee, C.-H., F. K. Soong, and B.-H. Juang, \"A Segment Model Based Approach to Speech Recognition,\" International Conference on Acoustics, Speech & Signal Processing, 1998, pp. 501-504.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "A Phonotactic Language Model for Spoken Language Identification", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "43 rd Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "515--522", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Li, H., and B. Ma, \"A Phonotactic Language Model for Spoken Language Identification,\" 43 rd Meeting of the Association for Computational Linguistics, 2005, pp. 515-522.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Using local and global phonotactic features in Chinese dialect identification", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Lim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "International Conference on Acoustics, Speech & Signal Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "577--580", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lim, B.P., H. Li, and B. Ma, \"Using local and global phonotactic features in Chinese dialect identification,\" International Conference on Acoustics, Speech & Signal Processing, 2005, vol. 1, pp. 577-580.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Multilingual Speech Recognition with Language Identification", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Guan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C.-H", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "International Conference on Spoken Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "505--508", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ma, B., C. Guan, H. Li, and C.-H. Lee, \"Multilingual Speech Recognition with Language Identification,\" International Conference on Spoken Language Processing, 2002, pp. 505-508.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "An Acoustic Segment Modeling Approach to Automatic Language Identification", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C.-H", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Speech Communication and Technology (Interspeech)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2829--2832", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ma, B., H. Li, and C.-H. Lee, \"An Acoustic Segment Modeling Approach to Automatic Language Identification,\" 9 th European Conference on Speech Communication and Technology (Interspeech), 2005, pp. 2829-2832.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Language Identification Incorporating Lexical Information", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Matrouf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Adda-Decker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Lamel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J.-L", |
|
"middle": [], |
|
"last": "Gauvain", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "International Conference on Spoken Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matrouf, D., M. Adda-Decker, L.F. Lamel, and J.-L. Gauvain, \"Language Identification Incorporating Lexical Information,\" International Conference on Spoken Language Processing, 1998.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Perceptual Benchmarks for Automatic Language Identification", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Muthusamy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Jain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Cole", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "International Conference on Acoustics, Speech & Signal Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "333--336", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Muthusamy, Y.K., N. Jain, and R. A. Cole, \"Perceptual Benchmarks for Automatic Language Identification,\" International Conference on Acoustics, Speech & Signal Processing, 1994, vol. 1, pp. 333-336.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Language Identification Using Parallel Syllable-Like Unit Recognition", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Nagarajan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Murthy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "International Conference on Acoustics, Speech & Signal Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "401--404", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nagarajan, T., and H.A. Murthy, \"Language Identification Using Parallel Syllable-Like Unit Recognition,\" International Conference on Acoustics, Speech & Signal Processing, 2004, vol. 1, pp. 401-404.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Multi-Stream Language Identification Using Data-Driven Dependency Selection", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Parandekar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Kirchhoff", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "International Conference on Acoustics, Speech & Signal Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "28--31", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Parandekar, S., and K. Kirchhoff, \"Multi-Stream Language Identification Using Data-Driven Dependency Selection,\" International Conference on Acoustics, Speech & Signal Processing, 2003, vol. 1, pp. 28-31.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "A Tutorial on Hidden Markov Models and Selected Applications in Speech Recognition", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Rabiner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1989, |
|
"venue": "Proc. IEEE", |
|
"volume": "77", |
|
"issue": "", |
|
"pages": "257--286", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rabiner, L.R., \"A Tutorial on Hidden Markov Models and Selected Applications in Speech Recognition,\" Proc. IEEE, 77(2), 1989, pp. 257-286.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Language identification using parallel sub-word recognition", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"K V" |
|
], |
|
"last": "Sai Jayram", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Ramasubramanian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Sreenivas", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "International Conference on Acoustics, Speech & Signal Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "32--35", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sai Jayram, A.K.V., V. Ramasubramanian, and T. V. Sreenivas, \"Language identification using parallel sub-word recognition,\" International Conference on Acoustics, Speech & Signal Processing, 2003, vol. 1, pp. 32-35.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "The SMART retrievl system", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Salton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1971, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Salton, G., The SMART retrievl system. Prentice-Hall, Englewood Cliffs, NJ, 1971.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Machine learning in automated text categorization", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Sebastiani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "ACM Computing Surveys", |
|
"volume": "34", |
|
"issue": "1", |
|
"pages": "1--47", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastiani, F., \"Machine learning in automated text categorization,\" ACM Computing Surveys, 34(1), 2002, pp. 1-47.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Acoustic, Phonetic and Discriminative Approaches to Automatic Language Recognition", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Singer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Torres-Carrasquillo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Gleason", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Campbell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Reynolds", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Speech Communication and Technology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1345--1348", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Singer, E., P.A. Torres-Carrasquillo, T.P. Gleason, W.M. Campbell, and D.A. Reynolds, \"Acoustic, Phonetic and Discriminative Approaches to Automatic Language Recognition,\" 8 th European Conference on Speech Communication and Technology, 2003, pp. 1345-1348.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Automatic Language Recognition Using Acoustic Features", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Sugiyama", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1991, |
|
"venue": "International Conference on Acoustics, Speech & Signal Processing", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "813--816", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sugiyama, M., \"Automatic Language Recognition Using Acoustic Features,\" International Conference on Acoustics, Speech & Signal Processing, 1991, vol. 2, pp. 813-816.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Language Identification Using Gaussian Mixture Model Tokenization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Torres-Carrasquillo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Reynolds", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Deller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jr", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "International Conference on Acoustics, Speech & Signal Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "757--760", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Torres-Carrasquillo, P.A., D.A. Reynolds and J. R. Deller, Jr, \"Language Identification Using Gaussian Mixture Model Tokenization,\" International Conference on Acoustics, Speech & Signal Processing, 2002, vol. 1, pp. 757-760.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "The Nature of Statistical Learning Theory", |
|
"authors": [ |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Vapnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vapnik, V., The Nature of Statistical Learning Theory, Springer-Verlag, 1995.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "An Approach to Automatic Language Identification Based on Language Dependent Phone Recognition", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Barnard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "International Conference on Acoustics, Speech & Signal Processing", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "3511--3514", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yan, Y., and E. Barnard, \"An Approach to Automatic Language Identification Based on Language Dependent Phone Recognition,\" International Conference on Acoustics, Speech & Signal Processing, 1995, vol. 5, pp. 3511-3514.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Comparison of Four Approaches to Automatic Language Identification of Telephone Speech", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Zissman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "IEEE Trans. Speech and Audio Proc", |
|
"volume": "4", |
|
"issue": "1", |
|
"pages": "31--44", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zissman, M.A., \"Comparison of Four Approaches to Automatic Language Identification of Telephone Speech,\" IEEE Trans. Speech and Audio Proc., 4(1), 1996, pp. 31-44.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF1": { |
|
"uris": null, |
|
"text": "66,822 dimensional vector from the UPR (V-UPR)", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF0": { |
|
"text": "", |
|
"num": null, |
|
"content": "<table><tr><td>API-I</td><td>Count</td><td>API-II</td><td>Count</td></tr><tr><td>English</td><td>44</td><td>English</td><td>48</td></tr><tr><td>Mandarin</td><td>43</td><td>Mandarin</td><td>39</td></tr><tr><td>Korean</td><td>37</td><td>German</td><td>52</td></tr><tr><td>General</td><td>4</td><td>Hindi</td><td>51</td></tr><tr><td/><td/><td>Japanese</td><td>32</td></tr><tr><td/><td/><td>Spanish</td><td>36</td></tr><tr><td>Total</td><td>128</td><td>Total</td><td>258</td></tr></table>", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF1": { |
|
"text": "", |
|
"num": null, |
|
"content": "<table><tr><td>Error Rate (%)</td><td>32-ASM</td><td>64-ASM</td><td>128-ASM</td></tr><tr><td>Unigrams</td><td>40.1</td><td>26.7</td><td>22.3</td></tr><tr><td>Bigrams</td><td>32.6</td><td>18.6</td><td>13.9</td></tr><tr><td>Trigrams</td><td>27.9</td><td>NA</td><td>NA</td></tr></table>", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"text": "", |
|
"num": null, |
|
"content": "<table><tr><td/><td>System</td><td>1996 LRE</td><td>2003 LRE</td></tr><tr><td>1</td><td>PPR-VSM</td><td>2.75</td><td>3.62</td></tr><tr><td>2</td><td>PPR-LM</td><td>2.92</td><td>4.54</td></tr><tr><td>3</td><td>UPR-VSM</td><td>4.87</td><td>6.33</td></tr><tr><td>4</td><td>Phone Lattice</td><td/><td/></tr></table>", |
|
"html": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |