|
{ |
|
"paper_id": "2019", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:30:14.810510Z" |
|
}, |
|
"title": "Non-native Accent Partitioning for Speakers of Indian Regional Languages", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Radha Krishna", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "VNRVJIET Hyderabad", |
|
"location": { |
|
"country": "India" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Krishnan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amritha University Coimbattore", |
|
"location": { |
|
"country": "India" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Mittal", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "K L University", |
|
"location": { |
|
"settlement": "Vijayawada", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Acoustic features extracted from the speech signal can help in identifying speaker related multiple information such as geographical origin, regional accent and nativity. In this paper, classification of native speakers of South Indian languages is carried out based upon the accent of their non-native language, i.e., English. Four South Indian languages: Kannada, Malayalam, Tamil, and Telugu are examined. A database of English speech from the native speakers of these languages, along with the native language speech data was collected, from a non-overlapping set of speakers. Segment level acoustic features Mel-frequency cepstral coefficients (MFCCs) and F 0 are used. Accent partitioning of non-native English speech data is carried out using multiple classifiers: k-nearest neighbour (KNN), linear discriminant analysis (LDA) and support vector machine (SVM), for validation and comparison of results. Classification accuracies of 86.6% are observed using KNN, and 89.2% or more than 90% using SVM classifier. A study of acoustic feature F 0 contour, related to L 2 intonation, showed that native speakers of Kannada language are quite distinct as compared to those of Tamil or Telugu languages. It is also observed that identification of Malayalam and Kannada speakers from their English speech accent is relatively easier than Telugu or Tamil speakers.", |
|
"pdf_parse": { |
|
"paper_id": "2019", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Acoustic features extracted from the speech signal can help in identifying speaker related multiple information such as geographical origin, regional accent and nativity. In this paper, classification of native speakers of South Indian languages is carried out based upon the accent of their non-native language, i.e., English. Four South Indian languages: Kannada, Malayalam, Tamil, and Telugu are examined. A database of English speech from the native speakers of these languages, along with the native language speech data was collected, from a non-overlapping set of speakers. Segment level acoustic features Mel-frequency cepstral coefficients (MFCCs) and F 0 are used. Accent partitioning of non-native English speech data is carried out using multiple classifiers: k-nearest neighbour (KNN), linear discriminant analysis (LDA) and support vector machine (SVM), for validation and comparison of results. Classification accuracies of 86.6% are observed using KNN, and 89.2% or more than 90% using SVM classifier. A study of acoustic feature F 0 contour, related to L 2 intonation, showed that native speakers of Kannada language are quite distinct as compared to those of Tamil or Telugu languages. It is also observed that identification of Malayalam and Kannada speakers from their English speech accent is relatively easier than Telugu or Tamil speakers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Identification of speakers, classification of their dialectal zones is important in a multilingual country like India (Bhattacharjee and Sarmah, 2012) . Speaker uniqueness is manifested in both anatomical and learned traits. When the context is constrained, speaker characteristics can be used reliably to identify individuals (Arslan and Hansen, 1996) . The accent is one of the glaring indications of linguistic and social background of a speaker. Studying the characteristics of dialect on a phonetic or phonemic level belongs to accent recognition . Earlier studies have concluded that native language (L 1 ) affects the speaker's traits of their second language (L 2 ) (Ghorbani et al., 2018; Graham and Post, 2018) . Analysis and classification of utterances that belong to specific groups of learners is the main objective of Native Language Identification (NLI) (Nisioi, 2015) . However, there is very little research on the question of accuracy with which accent features can be used to identify a speaker's regional or ethnic origin (Harper and Maxwell, 2008) . A solution to the problem of regional accent classification across English speaking South Indians is attempted in the present research, using a specifically developed corpus.", |
|
"cite_spans": [ |
|
{ |
|
"start": 118, |
|
"end": 150, |
|
"text": "(Bhattacharjee and Sarmah, 2012)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 327, |
|
"end": 352, |
|
"text": "(Arslan and Hansen, 1996)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 674, |
|
"end": 697, |
|
"text": "(Ghorbani et al., 2018;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 698, |
|
"end": 720, |
|
"text": "Graham and Post, 2018)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 870, |
|
"end": 884, |
|
"text": "(Nisioi, 2015)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 1043, |
|
"end": 1069, |
|
"text": "(Harper and Maxwell, 2008)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Discriminative classifiers based on characterizing acoustic differences across foreign accents can be employed to direct an accent dependent recognition system (Omar and Pelecanos, 2010; Ikeno and Hansen, 2006) . Systems with an automatic evaluation of non-native speech, which includes characteristics of the mother tongue will have better performance over similar algorithms that depend upon target languages (Qian et al., 2017) . This is particularly true when the text uttered is unknown. Native listeners are mostly aware of the speaker's regional accent and also the social or geographical subgroup within the region (Hanani et al., 2013) . Automatic speaker characterization is vital in real-world applications and the advantages are widely open (Zampieri et al., 2017; Krishna and Krishnan, 2014) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 160, |
|
"end": 186, |
|
"text": "(Omar and Pelecanos, 2010;", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 187, |
|
"end": 210, |
|
"text": "Ikeno and Hansen, 2006)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 411, |
|
"end": 430, |
|
"text": "(Qian et al., 2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 623, |
|
"end": 644, |
|
"text": "(Hanani et al., 2013)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 753, |
|
"end": 776, |
|
"text": "(Zampieri et al., 2017;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 777, |
|
"end": 804, |
|
"text": "Krishna and Krishnan, 2014)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Pattern recognition approach of collecting data, extracting suitable features, and training classification module using machine learning is a powerful tool in applications like Computer-Assisted-Pronunciation-Training (CAPT) programs. Acoustic descriptors are critical in tasks such as sound Classification (Day and Nandi, 2007) . State-of-the-art Accent Identification (AID) systems widely rely on spectral acoustic distribution for modeling the pronunciation. In applications like accent recognition, features distinguishing different phonemes of a language will be useful . Languagespecific differences in phonological development might be related to differences in phoneme and phoneme sequence frequency across languages (Ikeno and Hansen, 2006) . Such variations are also represented by the intonation patterns of individuals (Mary and Yegnanarayana, 2008; Li et al., 2017) . Apart from cepstral features that capture underlying acoustic characteristics, information from higher-level prosodic traits (Doddington, 2001; MALMASI and DRAS, 2017) were examined in the present study.", |
|
"cite_spans": [ |
|
{ |
|
"start": 307, |
|
"end": 328, |
|
"text": "(Day and Nandi, 2007)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 725, |
|
"end": 749, |
|
"text": "(Ikeno and Hansen, 2006)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 831, |
|
"end": 861, |
|
"text": "(Mary and Yegnanarayana, 2008;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 862, |
|
"end": 878, |
|
"text": "Li et al., 2017)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 1006, |
|
"end": 1024, |
|
"text": "(Doddington, 2001;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 1025, |
|
"end": 1048, |
|
"text": "MALMASI and DRAS, 2017)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "English is the most widely spoken second language in India and elsewhere in the world (Saha and Mandal, 2015; Guntur et al., 2018) . Indian English has several varieties with their specific accents and phonological features and often a distinct lexicon. Research on spoken English of Indian speakers is urgently needed from a multidisciplinary perspective (Cheng et al., 2013; Krishna et al., 2019) . Present work is aimed at comparing the acoustic properties that are likely to differ between English accents different groups of South Indian language of speakers. The nonnative prosodic traits are a hindrance to proficiency in a second language (L 2 ), and also to the mutual understanding. Present work also examines the local prosodic changes in the non-native English speech, without incorporating any phonol-ogy of the specific languages. The ability to compensate against prosodic deviation during English production can be improved by identifying the articulatory gestures that emphasize the non-native speaker accent. The paper is organized as follows: Section 2 presents the details of the database, including the recording methodology. Section 3 describes acoustic and prosodic features used in foreign accent recognition. Section 4 describes the classification procedures employed in the NLI experiments. Section 5 gives the details of the experiments and results. Analysis of results of regional accent classification is given in section 6. Section 7 describes the key outcome and contributions. Conclusions drawn are given in Section 8.", |
|
"cite_spans": [ |
|
{ |
|
"start": 86, |
|
"end": 109, |
|
"text": "(Saha and Mandal, 2015;", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 110, |
|
"end": 130, |
|
"text": "Guntur et al., 2018)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 356, |
|
"end": 376, |
|
"text": "(Cheng et al., 2013;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 377, |
|
"end": 398, |
|
"text": "Krishna et al., 2019)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The main focus of current research work is on differentiating the regional non-native English accents of speakers, and also describing foreign accent in terms of a common set of fundamental speech attributes. A database has been specifically developed (G.Radha with native and non-native speech samples containing utterance by the speakers belonging to language groups Kannada (KAN), Malayalam (MAL), Tamil (TAM), and Telugu (TEL). Table 2 shows the template of file naming process.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 432, |
|
"end": 439, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data Sets of 4 Indian Regional Languages", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Among more than six thousand languages in the world, less than 10% of the languages are spoken by more than 90% of the people. Speakers and learners of the English language constitute a large proportion in countries like India, South Africa, and much of the developing world. India has distinct linguistic communities, each of which shares a common language and culture. English, Hindi and dominant local languages are spoken nonnatively by a large number of Indians. In South Indian cities, many people speak at least two second languages. It would be beneficial if speech based systems can store models of all known languages and carry out the task of NLI automatically. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Selection of Regional Languages", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The details of speech corpus developed for each of the languages is shown in Table 1 . Native speech utterances of 20 speakers from each of the native language groups KAN, TAM, and TEL, each with a duration of 300 seconds formed the training set. English test samples for a duration of 60 seconds were collected from 25 speakers belonging to each of the four groups KAN, MAL, TAM, and TEL. As the sufficient number of native speakers of MAL are not readily available, it is included in the testing set only. The test utterances were recorded under identical conditions as training speech samples and there is no overlap between training and testing sets with respect to speakers and sentences. Each of the test samples is recorded for a duration of 60 seconds. The nonnative English speech samples are collected from a set of speakers with nearly uniform geographical distribution within a region with an educational background of at least graduation, but who do not use English routinely.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 77, |
|
"end": 84, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Speech Corpus Recording Methodology", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Recordings of speakers were made in quiet office room conditions using Logitech h110 microphone and waveforms are sampled at a rate of 16 kHz. The recordings were made in a laboratory environment with written text, with negligible re- Attitudinal, Accentual, Discourse, Grammatical verberation. The participants were asked to read aloud passages of a text from general topics. For applications like screening of non-native speech, read data can be used for both training and testing (Schuller et al., 2013) . It is ensured that Gender weightages are equally distributed in training as well as testing data sets. The speakers in the training set are considered representative of the regional languages KAN, TAM and TEL. However, for testing set speakers of Malayalam were also included. These speakers are so chosen from language heartlands. The speakers in the test set are considered potential users of future systems augmented with automatic Accent Identification (AID) capability.", |
|
"cite_spans": [ |
|
{ |
|
"start": 483, |
|
"end": 506, |
|
"text": "(Schuller et al., 2013)", |
|
"ref_id": "BIBREF44" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Speech Corpus Recording Methodology", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Understanding similar variations in foreign accents is a crucial factor for the development of an NLI system. The dominant articulatory traits of different languages are different (Koreman, 2018) . In applications like accent recognition, features distinguishing different phonemes of a language will be useful (Li et al., 2013) . The acoustic signature or the voice individuality of the speech signal are available as differences in transformations occurring at semantic, linguistic articulatory, and acoustic levels. Out of all the factors affecting speech, accent is a week factor in the sense that speech variation is not as evident as that due to speaker/gender. Language-specific differences in phonological development might be related to differences in phoneme and phoneme sequence frequency across languages (Graham and Post, 2018) . Speakers of the second language (SL) are expected to import certain patterns from their native language (NL) Figure 1 : Front end signal processing for feature extraction which are audible in SL. The influence of the surrounding speech prosody on new-born cry melody has been shown (Monnin and Loevenbruck, 2010) . The non-native speech detection is thus very challenging .", |
|
"cite_spans": [ |
|
{ |
|
"start": 180, |
|
"end": 195, |
|
"text": "(Koreman, 2018)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 311, |
|
"end": 328, |
|
"text": "(Li et al., 2013)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 817, |
|
"end": 840, |
|
"text": "(Graham and Post, 2018)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 1125, |
|
"end": 1155, |
|
"text": "(Monnin and Loevenbruck, 2010)", |
|
"ref_id": "BIBREF35" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 952, |
|
"end": 960, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Features for Non-native Accent Partitioning", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Characterization of a foreign accent is mostly based on either auditory analysis or manual transcriptions of deviations. The auditory spectrum is consistent with several phenomena observed in speech perception and is useful in automatic speaker independent speech recognition. Features used for nonnativeness detection include cepstral vectors, phone strings and a variety of prosodic features, but when used alone, systems based on acoustic features perform better (Shriberg et al., 2005) . We can consider acoustic features, which are proxy of phonetic reproduction as acousticphonetic features (Li et al., 2013) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 466, |
|
"end": 489, |
|
"text": "(Shriberg et al., 2005)", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 597, |
|
"end": 614, |
|
"text": "(Li et al., 2013)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features for Non-native Accent Partitioning", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Earlier investigations on text-independent nonnative speech tied to underlying native language structure are based on (i) Global acoustic distribution of phonemes (which requires no language knowledge) (ii) Different intonations corresponding to uniqueness in the manner in which articulators are manipulated. The shape of the vocal tract is manifested in the envelope of the shorttime power spectrum (Reynolds and Rose, 1995) . The attributes that contain speaker identifiability for machine as well as for humans are of interest (Zheng et al., 2007; .", |
|
"cite_spans": [ |
|
{ |
|
"start": 401, |
|
"end": 426, |
|
"text": "(Reynolds and Rose, 1995)", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 531, |
|
"end": 551, |
|
"text": "(Zheng et al., 2007;", |
|
"ref_id": "BIBREF50" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic Features", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In this study, acoustic features used for phonetic modeling of the accent differences consists of the cepstral features: Perceptive Linear Prediction Coefficients (PLPs), Linear Predictive Cepstral Coefficients (LPCCs), and MFCCs (Hermansky, 1990; Luengo et al., 2008; Mittal and Yegnanarayana, 2013) . The steps followed are shown in Figure Figure 2 : Waveform and Pitch contour of non-native English speech by female Kannada speaker 1. Given all the alternative spectral features based on LPC -cepstrum and FFT cepstrum for speaker recognition, MFCCs, give a highly compact representation of the spectral envelope of a sound (L\u00f3pez, 2014) . The LPCCs are known to capture extra information from a speech that discriminates different languages. The PLPs which take advantage of psychoacoustic principles are robust against noise. A hierarchy of speech characteristics, related speaker traits, and possible speech features are listed in Table 3 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 230, |
|
"end": 247, |
|
"text": "(Hermansky, 1990;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 248, |
|
"end": 268, |
|
"text": "Luengo et al., 2008;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 269, |
|
"end": 300, |
|
"text": "Mittal and Yegnanarayana, 2013)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 629, |
|
"end": 642, |
|
"text": "(L\u00f3pez, 2014)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 335, |
|
"end": 352, |
|
"text": "Figure Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 939, |
|
"end": 946, |
|
"text": "Table 3", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Acoustic Features", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The prosodic structure is a critical aspect of language contact and gives important information related to the speaking habit of a person (Kinnunen and Li, 2010; Farr\u00fas et al., 2010) . The goal is to capture prosodic idiosyncrasies of speakers belonging to different native languages. Prosodic cues Stress, Rhythm, and Intonation are each complex entities expressed using (i) Pitch (ii) Energy (iii) Duration. Major text-independent features used in prosodic analysis are given in Table 4 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 138, |
|
"end": 161, |
|
"text": "(Kinnunen and Li, 2010;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 162, |
|
"end": 182, |
|
"text": "Farr\u00fas et al., 2010)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 481, |
|
"end": 488, |
|
"text": "Table 4", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Prosodic Features", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In this study Prosodic statistics were obtained by performing different measurements of pitch, which are derived supra segmentally. The power of accent in voice identification is investigated as explained below. A Generative model of pronunciation describes what is acceptable, and Discriminative model both acceptable and unacceptable pronunciation, and the pronunciation score is the direct output of the classification module.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Prosodic Features", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Non-native prosodic traits limit proficiency in a second language (L 2 ). Prosodic phenomena located on word level and above, help listeners to structure the speech signal and to process the linguistic content successfully. Table 4 shows some of the features useful for detecting non-native speech without annotation of prosodic events. The Figure 3 : Distribution of MFCC Coefficients as a Scatter plot of C 0 versus C 1 for native ENGLISH speakers Figure 4 : Distribution of MFCC Coefficients C 0 versus C 1 for English speech by KANNADA speakers experiment by Rosenberg to foil a Speaker Verification system says that even an identical twin was unable to imitate the enrolled sibling well enough to get accepted by the system, tells the need to look at learned speaking behaviour.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 224, |
|
"end": 231, |
|
"text": "Table 4", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 341, |
|
"end": 349, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 450, |
|
"end": 458, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Prosodic Features", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Speaker Classification can be conveniently defined as a grouping of speakers speaking in a similar manner, on the basis of acoustic characteristics (Chen et al., 2014) . Classification of foreign accents directly from the acoustic features is at- Figure 5 : Distribution of MFCC Coefficients C 0 versus C 1 for English speech by MALAYALAM speakers Figure 6 : Distribution of MFCC Coefficients C 0 versus C 1 for English speech by TAMIL speakers Figure 7 : Distribution of MFCC Coefficients C 0 versus C 1 for English speech by TELUGU speakers tempted by using a test data set described in Table 1. The role of accent in voice identification is investigated as explained below. There exists a significant overlap between NLI approaches and computational methods for dialect and language identification (LID), and Support Vector Machine (SVM) classifiers are a very good fit for NLI (Zampieri et al., 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 148, |
|
"end": 167, |
|
"text": "(Chen et al., 2014)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 881, |
|
"end": 904, |
|
"text": "(Zampieri et al., 2017)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 247, |
|
"end": 255, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 348, |
|
"end": 356, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 445, |
|
"end": 453, |
|
"text": "Figure 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Classification for Non-native Accent Partitioning", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "SVM is one of the most popular supervised classifiers on a wide range of data sets, which looks for a maximum-margin hyper plane for data separation (Wu et al., 2010; Bahari et al., 2013; Campbell et al., 2006) . Accuracies of non-native accent classification were studied for the present problem by using the SVM classifier. The speech signal is first processed to extract attributes relevant to the foreign accent (Moustroufas and Digalakis, 2007) . The most representative acoustic features, the LPCC, the PLP (Li et al., 2013) have been tested but were found to be less efficient. The input to the system is a 13 dimensional MFCC vector consisting of 12 cepstral coefficients and one energy coefficient. Thus the front end for the proposed classification system consisted of only 13 dimensional MFCC vector including C 0 . ", |
|
"cite_spans": [ |
|
{ |
|
"start": 149, |
|
"end": 166, |
|
"text": "(Wu et al., 2010;", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 167, |
|
"end": 187, |
|
"text": "Bahari et al., 2013;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 188, |
|
"end": 210, |
|
"text": "Campbell et al., 2006)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 416, |
|
"end": 449, |
|
"text": "(Moustroufas and Digalakis, 2007)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 513, |
|
"end": 530, |
|
"text": "(Li et al., 2013)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Accent Partitioning using SVM Classifier", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Native traits located at a word and sentence levels help listeners structure the speech signal. In many approaches that apply prosody to either Language Identification (LID) or Speaker Recognition, extracted features are based on statistics of pitch / energy contour segments or piecewise linear stylization of pitch / energy contours. Intonation is a key expressive factor which can covey the intent of a speaker, contains a lot more information than words and utterance (Ward et al., 2017) . Intonation is more used than energy and duration features in the context of prosody. Listeners can discern a speaker's regional accent from intonation alone (Eady and Cooper, 1986; Tepperman and Narayanan, 2008) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 472, |
|
"end": 491, |
|
"text": "(Ward et al., 2017)", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 651, |
|
"end": 674, |
|
"text": "(Eady and Cooper, 1986;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 675, |
|
"end": 705, |
|
"text": "Tepperman and Narayanan, 2008)", |
|
"ref_id": "BIBREF46" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Intonation Analysis", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Dynamics of F 0 contour corresponding to a sound is influenced by several factors such as the identity of the sound unit, its context, the speaking style of the speaker, intonation rules of the language, type of the sentence, etc. (Arias et al., 2010) . The focus was mainly on the pitch since it is one of the most important characteristics of prosody and helps in predicting human intonation rating. These suprasegmental parameters can be used to model non-native English prosody (H\u00f6nig et al., 2012) . In the present study, the main aim is to ascertain the influence of linguistic background on F 0 across regional varieties of English, future studies are planned to include the aperiodic components of excitation of expressive voices like Noh voice (Mittal and Yegnanarayana, 2015) ", |
|
"cite_spans": [ |
|
{ |
|
"start": 231, |
|
"end": 251, |
|
"text": "(Arias et al., 2010)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 482, |
|
"end": 502, |
|
"text": "(H\u00f6nig et al., 2012)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 753, |
|
"end": 785, |
|
"text": "(Mittal and Yegnanarayana, 2015)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Intonation Analysis", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "To validate the hypothesis that the accent of the mother tongue is separable, experiments were performed to understand and to calibrate idiolectal differences in the non-native speech samples of the language groups KAN, MAL, TAM and TEL. The corpus is sampled at 16000 samples per second and the bit rate was 32 bits per sample. Silence removal has been implemented using a VAD algorithm (Kinnunen and Li, 2010) . The feature vectors are computed over 20 msec windowed frames every 10 msec. Fourier spectra were computed for sequential frames 160 points apart by using a 320 point Hamming window. Finally Cepstral Mean Normalization (CMN) is applied by subtracting the mean value of each feature over the entire utterance. MFCCs are generated by windowing the signal, application of DFT, taking the log of the magnitude and warping the frequencies on Mel scale and finally application of DCT.", |
|
"cite_spans": [ |
|
{ |
|
"start": 388, |
|
"end": 411, |
|
"text": "(Kinnunen and Li, 2010)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments and Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Experiments were performed to establish the differences in the distribution of acoustic features in the non-native speech samples of four language groups KAN, MAL,TAM, and TEL. Graphical il- lustration of accent partitioning on test data is shown in Figures 3,4 ,5,6,7, and 8. It indicates that the high classification accuracies are possible in the present task. Classification of foreign accents directly from the acoustic features is attempted, by using data set described in Table 1 . Figure 9 shows the confusion matrix for best performing SVM classifier for the five class classification. Figure 11 shows the confusion matrix for the three class classification.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 250, |
|
"end": 261, |
|
"text": "Figures 3,4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 479, |
|
"end": 486, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
}, |
|
{ |
|
"start": 489, |
|
"end": 497, |
|
"text": "Figure 9", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 595, |
|
"end": 604, |
|
"text": "Figure 11", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Non-native Accent Classification based upon Acoustic Features", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "The confusion matrix indicates that the identification rates for Kannada and Tamil language speakers from their non-native English speech can be high compared to that of Telugu native speakers . The Receiver Operating Point Curve (ROC) shown in Figure 10 is a plot of true positive rate as a function of false positive rate, which is very close to the upper left hand corner, indicates that the classifiers can achieve good overall accuracies.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 245, |
|
"end": 254, |
|
"text": "Figure 10", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Non-native Accent Classification based upon Acoustic Features", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Verification of accent partitioning of non-native speech using a series of classification techniques: k-nearest neighbourhood, and Linear Discriminant Analysis was also implemented. English speech samples of the native speakers of KAN, MAL, TAM, and TEL are tested against standard English speech corpus using TIMIT corpus. The resulting accuracies are 86.6% when a KNN clssifier is used, 82.5% when Discrimination classifier is used, and 89.2% using SVM classifier is used. These results are consolidated in Table 5 . Figure 4 , and 6 shows the corresponding confusion matrices, obtained during SVM classification. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 509, |
|
"end": 516, |
|
"text": "Table 5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 519, |
|
"end": 528, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Non-native Accent Classification based upon Acoustic Features", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Experiments were conducted on native and nonnative speech samples of bilingual and multilingual speakers. The pitch frequency was extracted using the \"pitch contour\" function of the Wave Surfer software, and F 0 data was extracted. Typical waveform showing the non-native speech by a female Kannada speaker and the pitch contour were shown in Figure 2 . The speakers in this study were asked to speak in their mother tongue or in English, and 20 exemplars were analysed from each group KAN, TAM, and TEL. In few cases the same speakers have spoken in other Indian language of the neighbouring state.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 343, |
|
"end": 351, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Foreign Accent Discrimination based upon Prosodic Features", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The difference in F 0 contour between native and non-native speech for speakers from each group has been tested. These results shown in Table 6 clearly indicate that the mean value of nonnative pitch is markedly high in the case nonnative speakers in all the three groups. The percentage deviation from native language to English speech for a group of 20 speakers in each of the three languages has been estimated and is presented in Table 7 . It is evident from the scores presented in Table 7 that the dynamic variation of pitch is the least at 3.7% for the regional variant of KAN speakers, which is significantly less when compared to 9.5%,and 27% corresponding to native TAM and TEL speakers respectively. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 136, |
|
"end": 144, |
|
"text": "Table 6", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 435, |
|
"end": 442, |
|
"text": "Table 7", |
|
"ref_id": "TABREF5" |
|
}, |
|
{ |
|
"start": 488, |
|
"end": 495, |
|
"text": "Table 7", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Foreign Accent Discrimination based upon Prosodic Features", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "\u2022 Figures 3,4 ,5, and 8 reveal that the English spoken by native Kannada and Malayalam speakers is distinct than native Tamil or Telugu speakers, when compared to standard English.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 2, |
|
"end": 13, |
|
"text": "Figures 3,4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Analysis of Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "\u2022 Accent partitioning experiments from a short utterance of 60 seconds of test data, indicates the suitability of the SVM classifier, as can be seen from accuracies shown in Table 5 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 174, |
|
"end": 181, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Analysis of Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "\u2022 Figure 1 reveals that the English spoken by Telugu native speakers are marginally closer to standard English, compared to that of Kannada and Malayalam language speakers.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 2, |
|
"end": 10, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Analysis of Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "\u2022 Higher mean values of the non-native pitch shown in Table 6 indicates the accommodation of speakers of all native languages to suit different social groups.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 54, |
|
"end": 61, |
|
"text": "Table 6", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Analysis of Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "\u2022 Table 7 shows that English speakers of Tamil and Telugu would produce statistically significant higher pitch contour deviations than KAN speakers.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 2, |
|
"end": 9, |
|
"text": "Table 7", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Analysis of Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "\u2022 A framework to handle the deviations of L 2 influenced by closely related L 1 s and to achieve better performance for a given NLI task, even with fewer features is proposed", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Key Outcome and Contributions", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "\u2022 Current study is significant when the target languages are linguistically close, and large resources of spoken English are not available", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Key Outcome and Contributions", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "\u2022 Prosodic differences across the South Indian English accents has been experimentally illustrated, which is useful in automatic intonation classification for L 2 speech acquisition. Language group Male Female Average Kannada 0.9 6.5 3.7 Tamil 9 10 9.5 Telugu 33 21 27", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Key Outcome and Contributions", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "\u2022 Present work helps in accurate recognition of regional accent, that can improve the speech and speaker recognition system performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Key Outcome and Contributions", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "\u2022 Distinct pitch pattern variations in non-native English speech by Malayalam, and Kannada speakers compared to that of Tamil and Telugu varieties can help in distinguishing them.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Key Outcome and Contributions", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "It can be concluded that the regional native language classification has been achieved with an accuracy of nearly 90%, by using the acoustic distribution of cepstral features on the four types of non-native South Indian English speech. It is known that systems make more mistakes among regionally close languages. Accent differences among the non-native speakers are reflected as the deviation of L 2 influenced by L 1 on prosodic level. Studies carried out based on intonation distribution indicates that English speaking South Indian groups corresponding to Kannada, Malayalam, Tamil, and Telugu are clearly divided as per their native languages. Prosodic differences in the native and English speech by South Indian speakers were detected without annotation. Present method can potentially be applied to other languages like Hindi, and in addressing the important question of finding a universal feature set for identifying the non-native speech. Present research is useful in applications such as voice based wireless services like mobile health care, agriculture. Automatic accent characterization can also be applied to fields such as sociolinguistics and speech pathology. Future work can employ different speech styles, and characteristics of speaker population to be carefully scrutinized, and also by including multi-disciplinary information. Further, the results can be extended to separating language families and also for rating L 2 proficiency.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Automatic intonation assessment for computer aided language learning", |
|
"authors": [ |
|
{ |
|
"first": "Juan", |
|
"middle": [ |
|
"Pablo" |
|
], |
|
"last": "Arias", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nestor", |
|
"middle": [], |
|
"last": "Becerra Yoma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hiram", |
|
"middle": [], |
|
"last": "Vivanco", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Speech Communication", |
|
"volume": "52", |
|
"issue": "3", |
|
"pages": "254--267", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.specom.2009.11.001" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Juan Pablo Arias, Nestor Becerra Yoma, and Hiram Vi- vanco. 2010. Automatic intonation assessment for computer aided language learning. Speech Commu- nication, 52(3):254-267.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Language accent classification in American English", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Levent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Arslan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "John", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hansen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "Speech Communication", |
|
"volume": "18", |
|
"issue": "4", |
|
"pages": "353--367", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/0167-6393(96)00024-6" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Levent M. Arslan and John H.L. Hansen. 1996. Lan- guage accent classification in American English. Speech Communication, 18(4):353-367.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Accent Recognition Using I-vector , Gaussian Mean Supervector and Gaussian Posterior probability Supervector for Spontaneous Telephone Speech", |
|
"authors": [ |
|
{ |
|
"first": "Mohamad", |
|
"middle": [], |
|
"last": "Hasan Bahari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rahim", |
|
"middle": [], |
|
"last": "Saeidi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hugo", |
|
"middle": [], |
|
"last": "Van Hamme", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Van Leeuwen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "IEEE International Conference on Acoustics, Speech and Signal Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7344--7348", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohamad Hasan Bahari, Rahim Saeidi, Hugo Van Hamme, and David Van Leeuwen. 2013. Ac- cent Recognition Using I-vector , Gaussian Mean Supervector and Gaussian Posterior probability Supervector for Spontaneous Telephone Speech. ICASSP, IEEE International Conference on Acous- tics, Speech and Signal Processing, pages 7344- 7348.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Gmmubm based speaker verification in multilingual environments", |
|
"authors": [ |
|
{ |
|
"first": "Utpal", |
|
"middle": [], |
|
"last": "Bhattacharjee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kshirod", |
|
"middle": [], |
|
"last": "Sarmah", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "International Journal of Computer Science Issues (IJCSI)", |
|
"volume": "9", |
|
"issue": "6", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Utpal Bhattacharjee and Kshirod Sarmah. 2012. Gmm- ubm based speaker verification in multilingual envi- ronments. International Journal of Computer Sci- ence Issues (IJCSI), 9(6):373.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Svm based speaker verification using a gmm supervector kernel and nap variability compensation", |
|
"authors": [ |
|
{ |
|
"first": "W", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Campbell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Sturim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Reynolds", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Solomonoff", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "IEEE International Conference on Acoustics Speech and Signal Processing Proceedings", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "I--I", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/ICASSP.2006.1659966" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "W. M. Campbell, D. E. Sturim, D. A. Reynolds, and A. Solomonoff. 2006. Svm based speaker verifica- tion using a gmm supervector kernel and nap vari- ability compensation. In 2006 IEEE International Conference on Acoustics Speech and Signal Pro- cessing Proceedings, volume 1, pages I-I.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Characterizing phonetic transformations and acoustic differences across English dialects", |
|
"authors": [ |
|
{ |
|
"first": "Nancy", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sharon", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Tam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wade", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joseph", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Campbell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "IEEE Transactions on Audio, Speech and Language Processing", |
|
"volume": "22", |
|
"issue": "1", |
|
"pages": "110--124", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/TASLP.2013.2285482" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nancy F. Chen, Sharon W. Tam, Wade Shen, and Joseph P. Campbell. 2014. Characterizing phonetic transformations and acoustic differences across En- glish dialects. IEEE Transactions on Audio, Speech and Language Processing, 22(1):110-124.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Automatic accent quantification of indian speakers of english", |
|
"authors": [ |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikhil", |
|
"middle": [], |
|
"last": "Bojja", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xin", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jian Cheng, Nikhil Bojja, and Xin Chen. 2013. Au- tomatic accent quantification of indian speakers of english. In INTERSPEECH.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Robust textindependent speaker verification using genetic programming", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Day", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Nandi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "IEEE Transactions on Audio, Speech, and Language Processing", |
|
"volume": "15", |
|
"issue": "1", |
|
"pages": "285--295", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/TASL.2006.876765" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "P. Day and A. K. Nandi. 2007. Robust text- independent speaker verification using genetic pro- gramming. IEEE Transactions on Audio, Speech, and Language Processing, 15(1):285-295.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Speaker recognition based on idiolectal differences between speakers", |
|
"authors": [ |
|
{ |
|
"first": "George", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Doddington", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "INTERSPEECH", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "George R. Doddington. 2001. Speaker recognition based on idiolectal differences between speakers. In INTERSPEECH.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Speech intonation and focus location in matched statements and questions", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Stephen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Eady", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Cooper", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1986, |
|
"venue": "The Journal of the Acoustical Society of America", |
|
"volume": "80", |
|
"issue": "2", |
|
"pages": "402--415", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1121/1.394091" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephen J. Eady and William E. Cooper. 1986. Speech intonation and focus location in matched statements and questions. The Journal of the Acoustical Society of America, 80(2):402-415.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Automatic speaker recognition as a measurement of voice imitation and conversion", |
|
"authors": [ |
|
{ |
|
"first": "Mireia", |
|
"middle": [], |
|
"last": "Farr\u00fas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Wagner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Erro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Javier", |
|
"middle": [ |
|
"Hernando" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "International Journal of Speech, Language and the Law", |
|
"volume": "17", |
|
"issue": "1", |
|
"pages": "119--142", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1558/ijsll.v17i1.119" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mireia Farr\u00fas, Michael Wagner, Daniel Erro, and Javier Hernando. 2010. Automatic speaker recognition as a measurement of voice imitation and conversion. International Journal of Speech, Language and the Law, 17(1):119-142.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Combination of machine scores for automatic grading of pronunciation quality", |
|
"authors": [ |
|
{ |
|
"first": "Horacio", |
|
"middle": [], |
|
"last": "Franco", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leonardo", |
|
"middle": [], |
|
"last": "Neumeyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Speech Communication", |
|
"volume": "30", |
|
"issue": "2", |
|
"pages": "121--130", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/S0167-6393(99)00045-X" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Horacio Franco, Leonardo Neumeyer, Vassilios Di- galakis, and Orith Ronen. 2000. Combination of machine scores for automatic grading of pronunci- ation quality. Speech Communication, 30(2):121- 130.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Leveraging native language information for improved accented speech recognition", |
|
"authors": [ |
|
{ |
|
"first": "Shahram", |
|
"middle": [], |
|
"last": "Ghorbani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H L", |
|
"middle": [], |
|
"last": "John", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hansen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2449--2453", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.21437/Interspeech.2018-1378" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shahram Ghorbani, John H L Hansen, Robust Speech, and Systems Crss. 2018. Leveraging native lan- guage information for improved accented speech recognition. (September):2449-2453.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Native Language Identification from South Indian English Speech", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Krishnan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Radha", |
|
"middle": [], |
|
"last": "Krishna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vinay", |
|
"middle": [], |
|
"last": "Kumar Mittal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Workshop on Machine Learning in Speech and Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. Krishnan G.Radha Krishna and Vinay Kumar Mit- tal. 2018. Native Language Identification from South Indian English Speech. In Workshop on Ma- chine Learning in Speech and Language Processing, September 7th, 2018.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Second language acquisition of intonation: Peak alignment in American English", |
|
"authors": [ |
|
{ |
|
"first": "Calbert", |
|
"middle": [], |
|
"last": "Graham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brechtje", |
|
"middle": [], |
|
"last": "Post", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Journal of Phonetics", |
|
"volume": "66", |
|
"issue": "", |
|
"pages": "1--14", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.wocn.2017.08.002" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Calbert Graham and Brechtje Post. 2018. Second lan- guage acquisition of intonation: Peak alignment in American English. Journal of Phonetics, 66:1-14.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Prosodic Analysis of Non-Native South Indian English Speech", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Radha Krishna Guntur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Krishnan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proc. The 6th Intl. Workshop on Spoken Language Technologies for Under-Resourced Languages", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "71--75", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.21437/SLTU.2018-15" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Radha Krishna Guntur, R Krishnan, and V.K. Mittal. 2018. Prosodic Analysis of Non-Native South In- dian English Speech. In Proc. The 6th Intl. Work- shop on Spoken Language Technologies for Under- Resourced Languages, pages 71-75.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Human and computer recognition of regional accents and ethnic groups from British English speech", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Hanani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Russell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Carey", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Computer Speech and Language", |
|
"volume": "27", |
|
"issue": "1", |
|
"pages": "59--74", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.csl.2012.01.003" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Hanani, M. J. Russell, and M. J. Carey. 2013. Human and computer recognition of regional ac- cents and ethnic groups from British English speech. Computer Speech and Language, 27(1):59-74.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Spoken Language Characterization", |
|
"authors": [ |
|
{ |
|
"first": "Mary", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Harper", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Maxwell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "797--810", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/978-3-540-49127-9_40" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mary P. Harper and Michael Maxwell. 2008. Spo- ken Language Characterization, pages 797-810.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Perceptual linear predictive (plp) analysis of speech", |
|
"authors": [ |
|
{ |
|
"first": "Hynek", |
|
"middle": [], |
|
"last": "Hermansky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "The Journal of the Acoustical Society of America", |
|
"volume": "87", |
|
"issue": "4", |
|
"pages": "1738--1752", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1121/1.399423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hynek Hermansky. 1990. Perceptual linear predictive (plp) analysis of speech. The Journal of the Acous- tical Society of America, 87(4):1738-1752.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Automatic Assessment of Non-Native Prosody Annotation, Modelling and Evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Florian", |
|
"middle": [], |
|
"last": "H\u00f6nig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anton", |
|
"middle": [], |
|
"last": "Batliner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elmar", |
|
"middle": [], |
|
"last": "N\u00f6th", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the International Symposium on Automatic Detection of Errors in Pronunciation Training (IS ADEPT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "21--30", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Florian H\u00f6nig, Anton Batliner, and Elmar N\u00f6th. 2012. Automatic Assessment of Non-Native Prosody An- notation, Modelling and Evaluation. Proceedings of the International Symposium on Automatic De- tection of Errors in Pronunciation Training (IS ADEPT), pages 21-30.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Perceptual Recognition Cues in Native English Accent Variation", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Ikeno", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"H L" |
|
], |
|
"last": "Hansen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "2006 IEEE International Conference on Acoustics Speed and Signal Processing Proceedings", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "401--404", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/ICASSP.2006.1660042" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Ikeno and J.H.L. Hansen. 2006. Perceptual Recog- nition Cues in Native English Accent Variation: \"Listener Accent, Perceived Accent, and Compre- hension\". 2006 IEEE International Conference on Acoustics Speed and Signal Processing Proceed- ings, 1:I-401-I-404.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "An overview of text-independent speaker recognition: From features to supervectors", |
|
"authors": [ |
|
{ |
|
"first": "Tomi", |
|
"middle": [], |
|
"last": "Kinnunen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haizhou", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Speech Communication", |
|
"volume": "52", |
|
"issue": "1", |
|
"pages": "12--40", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.specom.2009.08.009" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomi Kinnunen and Haizhou Li. 2010. An overview of text-independent speaker recognition: From features to supervectors. Speech Communication, 52(1):12 - 40.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Category similarity in multilingual pronunciation training", |
|
"authors": [ |
|
{ |
|
"first": "Jacques", |
|
"middle": [], |
|
"last": "Koreman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proc", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2578--2582", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.21437/Interspeech.2018-1938" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacques Koreman. 2018. Category similarity in multi- lingual pronunciation training. In Proc. Interspeech 2018, pages 2578-2582.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Influence of mother tongue on english accent", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Krishna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Krishnan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 11th International Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "63--67", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "G. Radha Krishna and R. Krishnan. 2014. Influence of mother tongue on english accent. In Proceedings of the 11th International Conference on Natural Lan- guage Processing, pages 63-67, Goa, India. NLP Association of India.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "An automated system for regional nativity identification of indian speakers from english speech", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Krishna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Krishnan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "16th IEEE India Council International Conference INDICON 2019", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "G.Radha Krishna, R.Krishnan, and V.K.Mittal. 2019. An automated system for regional nativity identifi- cation of indian speakers from english speech. In 16th IEEE India Council International Conference INDICON 2019 (Accepted).", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Spoken language recognition: From fundamentals to practice", |
|
"authors": [ |
|
{ |
|
"first": "Haizhou", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bin", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kong Aik", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the IEEE", |
|
"volume": "101", |
|
"issue": "5", |
|
"pages": "1136--1159", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/JPROC.2012.2237151" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Haizhou Li, Bin Ma, and Kong Aik Lee. 2013. Spoken language recognition: From fundamentals to prac- tice. Proceedings of the IEEE, 101(5):1136-1159.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Intonation classification for L2 English speech using multi-distribution deep neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Kun", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xixin", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Helen", |
|
"middle": [], |
|
"last": "Meng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Computer Speech and Language", |
|
"volume": "43", |
|
"issue": "", |
|
"pages": "18--33", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.csl.2016.11.006" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kun Li, Xixin Wu, and Helen Meng. 2017. Into- nation classification for L2 English speech using multi-distribution deep neural networks. Computer Speech and Language, 43:18-33.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Advances on Speaker Recognition in non Collaborative Environments", |
|
"authors": [ |
|
{ |
|
"first": "Jes\u00fas Antonio Villalba", |
|
"middle": [], |
|
"last": "L\u00f3pez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jes\u00fas Antonio Villalba L\u00f3pez. 2014. Advances on Speaker Recognition in non Collaborative Environ- ments. page 311.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Text independent speaker identification in multilingual environments", |
|
"authors": [ |
|
{ |
|
"first": "Iker", |
|
"middle": [], |
|
"last": "Luengo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eva", |
|
"middle": [], |
|
"last": "Navas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I\u00f1aki", |
|
"middle": [], |
|
"last": "Sainz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ibon", |
|
"middle": [], |
|
"last": "Saratxaga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jon", |
|
"middle": [], |
|
"last": "Sanchez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "LREC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Iker Luengo, Eva Navas, I\u00f1aki Sainz, Ibon Saratxaga, Jon Sanchez, Igor Odriozola, and Inma Hernaez. 2008. Text independent speaker identification in multilingual environments. In LREC 2008.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Multilingual native language identification", |
|
"authors": [ |
|
{ |
|
"first": "Shervin", |
|
"middle": [], |
|
"last": "Malmasi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Dras", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Natural Language Engineering", |
|
"volume": "23", |
|
"issue": "2", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1017/S1351324915000406" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "SHERVIN MALMASI and MARK DRAS. 2017. Mul- tilingual native language identification. Natural Language Engineering, 23(2):163215.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Extraction and representation of prosodic features for language and speaker recognition", |
|
"authors": [ |
|
{ |
|
"first": "Leena", |
|
"middle": [], |
|
"last": "Mary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Yegnanarayana", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Speech Communication", |
|
"volume": "50", |
|
"issue": "10", |
|
"pages": "782--796", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.specom.2008.04.010" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leena Mary and B. Yegnanarayana. 2008. Extraction and representation of prosodic features for language and speaker recognition. Speech Communication, 50(10):782-796.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Effect of glottal dynamics in the production of shouted speech", |
|
"authors": [ |
|
{ |
|
"first": "V", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Mittal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Yegnanarayana", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "The Journal of the Acoustical Society of America", |
|
"volume": "133", |
|
"issue": "5", |
|
"pages": "3050--3061", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1121/1.4796110" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "V. K. Mittal and B. Yegnanarayana. 2013. Effect of glottal dynamics in the production of shouted speech. The Journal of the Acoustical Society of America, 133(5):3050-3061.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Study of characteristics of aperiodicity in noh voices", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Vinay Kumar Mittal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Yegnanarayana", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "The Journal of the Acoustical Society of America", |
|
"volume": "137", |
|
"issue": "6", |
|
"pages": "3411--3421", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vinay Kumar Mittal and B Yegnanarayana. 2015. Study of characteristics of aperiodicity in noh voices. The Journal of the Acoustical Society of America, 137(6):3411-3421.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Study of the effects of vocal tract constriction on glottal vibration", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Vinay Kumar Mittal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peri", |
|
"middle": [], |
|
"last": "Yegnanarayana", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bhaskararao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "The Journal of the Acoustical Society of America", |
|
"volume": "136", |
|
"issue": "4", |
|
"pages": "1932--1941", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vinay Kumar Mittal, B Yegnanarayana, and Peri Bhaskararao. 2014. Study of the effects of vocal tract constriction on glottal vibration. The Journal of the Acoustical Society of America, 136(4):1932- 1941.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Significance of aperiodicity in the pitch perception of expressive voices", |
|
"authors": [ |
|
{ |
|
"first": "Bayya", |
|
"middle": [], |
|
"last": "Vinay Kumar Mittal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Yegnanarayana", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "INTERSPEECH", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vinay Kumar Mittal and Bayya Yegnanarayana. 2014. Significance of aperiodicity in the pitch perception of expressive voices. In INTERSPEECH.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Language-specific influence on phoneme development: French and drehu data", |
|
"authors": [ |
|
{ |
|
"first": "Julia", |
|
"middle": [], |
|
"last": "Monnin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H\u00e9l\u00e8ne", |
|
"middle": [], |
|
"last": "Loevenbruck", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "INTERSPEECH", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Julia Monnin and H\u00e9l\u00e8ne Loevenbruck. 2010. Language-specific influence on phoneme develop- ment: French and drehu data. In INTERSPEECH.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Automatic pronunciation evaluation of foreign speakers using unknown text", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Moustroufas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Digalakis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Computer Speech and Language", |
|
"volume": "21", |
|
"issue": "1", |
|
"pages": "219--230", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.csl.2006.04.001" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "N. Moustroufas and V. Digalakis. 2007. Automatic pronunciation evaluation of foreign speakers using unknown text. Computer Speech and Language, 21(1):219-230.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Vassilios Digalakis, and Mitchel Weintraub", |
|
"authors": [ |
|
{ |
|
"first": "Leonardo", |
|
"middle": [], |
|
"last": "Neumeyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Horacio", |
|
"middle": [], |
|
"last": "Franco", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Speech Communication", |
|
"volume": "30", |
|
"issue": "2", |
|
"pages": "83--93", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/S0167-6393(99)00046-1" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leonardo Neumeyer, Horacio Franco, Vassilios Di- galakis, and Mitchel Weintraub. 2000. Automatic scoring of pronunciation quality. Speech Communi- cation, 30(2):83-93.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Feature analysis for native language identification", |
|
"authors": [ |
|
{ |
|
"first": "Sergiu", |
|
"middle": [], |
|
"last": "Nisioi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)", |
|
"volume": "9041", |
|
"issue": "", |
|
"pages": "644--657", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/978-3-319-18111-0_49" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sergiu Nisioi. 2015. Feature analysis for native lan- guage identification. Lecture Notes in Computer Science (including subseries Lecture Notes in Arti- ficial Intelligence and Lecture Notes in Bioinformat- ics), 9041:644-657.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "A novel approach to detecting non-native speakers and their native language", |
|
"authors": [ |
|
{ |
|
"first": "Mohamed", |
|
"middle": [], |
|
"last": "Kamal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omar", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Pelecanos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "IEEE International Conference on Acoustics, Speech and Signal Processing -Proceedings", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4398--4401", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/ICASSP.2010.5495628" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohamed Kamal Omar and Jason Pelecanos. 2010. A novel approach to detecting non-native speakers and their native language. ICASSP, IEEE International Conference on Acoustics, Speech and Signal Pro- cessing -Proceedings, pages 4398-4401.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Improving sub-phone modeling for better native language identification with non-native english speech", |
|
"authors": [ |
|
{ |
|
"first": "Hillary", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Lange", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frank", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Molloy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Soong", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "INTERSPEECH", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lange, Hillary R. Molloy, and Frank K. Soong. 2017. Improving sub-phone modeling for better na- tive language identification with non-native english speech. In INTERSPEECH.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Robust textindependent speaker identification using gaussian mixture speaker models", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Reynolds", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Rose", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "IEEE Transactions on Speech and Audio Processing", |
|
"volume": "3", |
|
"issue": "1", |
|
"pages": "72--83", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/89.365379" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D. A. Reynolds and R. C. Rose. 1995. Robust text- independent speaker identification using gaussian mixture speaker models. IEEE Transactions on Speech and Audio Processing, 3(1):72-83.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Study of acoustic correlates of english lexical stress produced by native (l1) bengali speakers compared to native (l1) english speakers", |
|
"authors": [ |
|
{ |
|
"first": "Nath", |
|
"middle": [], |
|
"last": "Shambhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shyamal", |
|
"middle": [], |
|
"last": "Saha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Das Mandal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "INTER-SPEECH", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shambhu Nath Saha and Shyamal Kr. Das Mandal. 2015. Study of acoustic correlates of english lexi- cal stress produced by native (l1) bengali speakers compared to native (l1) english speakers. In INTER- SPEECH.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Paralinguistics in speech and language -State-of-the-art and the challenge", |
|
"authors": [ |
|
{ |
|
"first": "Bj\u00f6rn", |
|
"middle": [], |
|
"last": "Schuller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Steidl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anton", |
|
"middle": [], |
|
"last": "Batliner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Burkhardt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laurence", |
|
"middle": [], |
|
"last": "Devillers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shrikanth", |
|
"middle": [], |
|
"last": "Narayanan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Computer Speech and Language", |
|
"volume": "27", |
|
"issue": "1", |
|
"pages": "4--39", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.csl.2012.02.005" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bj\u00f6rn Schuller, Stefan Steidl, Anton Batliner, Felix Burkhardt, Laurence Devillers, Christian M\u00fcller, and Shrikanth Narayanan. 2013. Paralinguistics in speech and language -State-of-the-art and the chal- lenge. Computer Speech and Language, 27(1):4-39.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "Modeling prosodic feature sequences for speaker recognition", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Shriberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Ferrer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Kajarekar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Venkataraman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Stolcke", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Speech Communication", |
|
"volume": "46", |
|
"issue": "3-4", |
|
"pages": "455--472", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.specom.2005.02.018" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "E. Shriberg, L. Ferrer, S. Kajarekar, A. Venkataraman, and A. Stolcke. 2005. Modeling prosodic feature sequences for speaker recognition. Speech Commu- nication, 46(3-4):455-472.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Better nonnative intonation scores through prosodic theory", |
|
"authors": [ |
|
{ |
|
"first": "Joseph", |
|
"middle": [], |
|
"last": "Tepperman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shrikanth", |
|
"middle": [], |
|
"last": "Narayanan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "INTERSPEECH", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joseph Tepperman and Shrikanth Narayanan. 2008. Better nonnative intonation scores through prosodic theory. In INTERSPEECH.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "Non-Native Differences in Prosodic-Construction Use", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Nigel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paola", |
|
"middle": [], |
|
"last": "Org", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanda", |
|
"middle": [], |
|
"last": "Gallardo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Stent", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Dialogue & Discourse", |
|
"volume": "8", |
|
"issue": "1", |
|
"pages": "1--30", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.5087/dad.2017.101" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nigel G Ward, Nigelward@acm Org, Paola Gallardo, and Amanda Stent. 2017. Non-Native Differences in Prosodic-Construction Use. Dialogue & Discourse, 8(1):1-30.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "Feature subset selection for improved native accent identification", |
|
"authors": [ |
|
{ |
|
"first": "Tingyao", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacques", |
|
"middle": [], |
|
"last": "Duchateau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jean", |
|
"middle": [ |
|
"Pierre" |
|
], |
|
"last": "Martens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dirk", |
|
"middle": [], |
|
"last": "Van Compernolle", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Speech Communication", |
|
"volume": "52", |
|
"issue": "2", |
|
"pages": "83--98", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.specom.2009.08.010" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tingyao Wu, Jacques Duchateau, Jean Pierre Martens, and Dirk Van Compernolle. 2010. Feature subset selection for improved native accent identification. Speech Communication, 52(2):83-98.", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "Integration of complementary acoustic features for speaker recognition", |
|
"authors": [ |
|
{ |
|
"first": "Nengheng", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tan", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Ching", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "IEEE Signal Processing Letters", |
|
"volume": "14", |
|
"issue": "3", |
|
"pages": "181--184", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/LSP.2006.884031" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nengheng Zheng, Tan Lee, and P. C. Ching. 2007. Integration of complementary acoustic features for speaker recognition. IEEE Signal Processing Let- ters, 14(3):181-184.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"text": "Distribution of MFCC Coefficients C 0 versus C 1 for non-native English speech by four South Indian language speakers against native English speech.", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"text": "Confusion Matrix for SVM classification of South-Indian English including native English. Note: TPR is True Positive Rate, FNR is False Negative Rate.Table5: Non-native Regional English Accent Classification accuracies using (a) k-nearest neighbourhood (KNN), (b) Linear Discriminant (LDA), and (c) SVM Classifier (a) KNN (b) LDA (c) SVM Accuracy 86", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"text": ": ROC curve for SVM classification of Nonnative English speech by Kannada speakers.", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF3": { |
|
"uris": null, |
|
"text": "Confusion Matrix for SVM classification of English by speakers of KAN, TAM, and TEL. Note: TPR is True Positive Rate, FNR is False Negative Rate.", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF0": { |
|
"type_str": "table", |
|
"html": null, |
|
"text": "Summary of data used for training and testing: (a) attributes (b) values for training set and (c) values for testing set", |
|
"num": null, |
|
"content": "<table><tr><td colspan=\"2\">(a) Attributes</td><td colspan=\"2\">(b) Training set (c) Test set</td></tr><tr><td colspan=\"2\">Total number</td><td>60</td><td>75</td></tr><tr><td>of speakers</td><td/><td/><td/></tr><tr><td colspan=\"2\">Speakers per</td><td>20</td><td>25</td></tr><tr><td colspan=\"2\">language group</td><td/><td/></tr><tr><td colspan=\"2\">(KAN, MAL*,</td><td/><td/></tr><tr><td colspan=\"2\">TAM, TEL)</td><td/><td/></tr><tr><td>Speech</td><td>Du-</td><td>300 sec</td><td>60 sec</td></tr><tr><td>ration</td><td>per</td><td/><td/></tr><tr><td>speaker</td><td/><td/><td/></tr><tr><td colspan=\"4\">Note: *MAL-Malayalam data set is used only in</td></tr><tr><td colspan=\"4\">tests related to cepstral features.</td></tr></table>" |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"html": null, |
|
"text": "Template of file naming for data recording", |
|
"num": null, |
|
"content": "<table><tr><td>Native</td></tr><tr><td>language Name Age / Sex File Name</td></tr></table>" |
|
}, |
|
"TABREF2": { |
|
"type_str": "table", |
|
"html": null, |
|
"text": "Summary of speaker traits and related speech features(Day and Nandi, 2007).", |
|
"num": null, |
|
"content": "<table><tr><td colspan=\"2\">Speech char-</td><td colspan=\"3\">Speaker trait Speech feature</td></tr><tr><td colspan=\"2\">acteristic</td><td/><td/></tr><tr><td colspan=\"2\">Lexical, Syn-</td><td colspan=\"2\">Socio eco-</td><td>Vocabulary,</td></tr><tr><td>tactic</td><td/><td>nomic</td><td/><td>Word</td></tr><tr><td colspan=\"2\">(Idiolect,</td><td colspan=\"2\">Educational</td><td>arrangement</td></tr><tr><td colspan=\"2\">Semantics,</td><td colspan=\"2\">status (Lan-</td><td>& grammatical</td></tr><tr><td>Pronun-</td><td/><td>guage</td><td>use</td><td>cues.</td></tr><tr><td>ciations,</td><td/><td colspan=\"2\">and sentence</td></tr><tr><td colspan=\"2\">dictions, Id-</td><td colspan=\"2\">construction)</td></tr><tr><td colspan=\"2\">iosyncrasies)</td><td/><td/></tr><tr><td>Prosodic</td><td/><td colspan=\"2\">Personality</td><td>Durational fea-</td></tr><tr><td colspan=\"2\">(Rhythm,</td><td colspan=\"2\">type, Parental</td><td>tures.</td><td>Pitch</td></tr><tr><td colspan=\"2\">Intonation,</td><td>influences</td><td/><td>dynamics, En-</td></tr><tr><td colspan=\"2\">Articulation</td><td/><td/><td>ergy (likely to</td></tr><tr><td>rate etc.)</td><td/><td/><td/><td>be Text / time</td></tr><tr><td/><td/><td/><td/><td>dependent).</td></tr><tr><td>Low</td><td>level</td><td colspan=\"2\">Anatomical</td><td>Short-time spec-</td></tr><tr><td>acoustic</td><td/><td colspan=\"2\">structure of</td><td>trum, Predictor</td></tr><tr><td>features</td><td/><td colspan=\"2\">speaker's vo-</td><td>coefficients, In-</td></tr><tr><td/><td/><td colspan=\"2\">cal apparatus</td><td>tensity, Pitch.</td></tr></table>" |
|
}, |
|
"TABREF3": { |
|
"type_str": "table", |
|
"html": null, |
|
"text": "Major text-independent features used in prosodic analysis.", |
|
"num": null, |
|
"content": "<table><tr><td colspan=\"2\">Prosodic</td><td>Factors that influences speech</td></tr><tr><td colspan=\"2\">features</td><td/></tr><tr><td colspan=\"2\">Dynamics</td><td>Identity of sound unit, its position</td></tr><tr><td>of</td><td>F 0</td><td>from phrase, word; Speaking style;</td></tr><tr><td colspan=\"2\">contour</td><td>Intonation rules; Type of sentence</td></tr><tr><td/><td/><td>(Interrogative, Declarative)</td></tr><tr><td colspan=\"2\">Intonation,</td><td/></tr><tr><td colspan=\"2\">Rhythm,</td><td/></tr><tr><td>Stress</td><td/><td/></tr></table>" |
|
}, |
|
"TABREF4": { |
|
"type_str": "table", |
|
"html": null, |
|
"text": "Mean (\u00b5) and SD (\u03c3) of Pitch variation of single speaker from three groups of native speakers when speaking (a) Native Language (NL) (b) English (c) Other South Indian language (OSIL)", |
|
"num": null, |
|
"content": "<table><tr><td/><td/><td colspan=\"2\">LANGUAGE SPOKEN</td></tr><tr><td>L1</td><td>\u00b5</td><td>(a) NL \u03c3</td><td>(b) English (c) OSIL \u00b5 \u03c3 \u00b5 \u03c3</td></tr><tr><td colspan=\"4\">Kan 214 32.2 254 32.3 235 32.4</td></tr><tr><td colspan=\"4\">Tam 227 21.7 248 28.9 230 30.6</td></tr><tr><td>Tel</td><td colspan=\"3\">133 21.5 157 22.9 150 26.3</td></tr></table>" |
|
}, |
|
"TABREF5": { |
|
"type_str": "table", |
|
"html": null, |
|
"text": "Percentage increase in Standard Deviation of pitch contour from native language speech to English speech (using two non-overlapping sets of 20 speakers from each native language group Kannada, Tamil, and Telugu).", |
|
"num": null, |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |