|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T06:07:43.677327Z" |
|
}, |
|
"title": "Me, myself, and ire: Effects of automatic transcription quality on emotion, sarcasm, and personality detection", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Culnan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Arizona", |
|
"location": { |
|
"settlement": "Tucson", |
|
"region": "Arizona", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "jmculnan@email.arizona.edu" |
|
}, |
|
{ |
|
"first": "Seongjin", |
|
"middle": [], |
|
"last": "Park", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Arizona", |
|
"location": { |
|
"settlement": "Tucson", |
|
"region": "Arizona", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "seongjinpark@email.arizona.edu" |
|
}, |
|
{ |
|
"first": "Meghavarshini", |
|
"middle": [], |
|
"last": "Krishnaswamy", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Arizona", |
|
"location": { |
|
"settlement": "Tucson", |
|
"region": "Arizona", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "mkrishnaswamy@email.arizona.edu" |
|
}, |
|
{ |
|
"first": "Rebecca", |
|
"middle": [], |
|
"last": "Sharp", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Arizona", |
|
"location": { |
|
"settlement": "Tucson", |
|
"region": "Arizona", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "bsharp@email.arizona.edu" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "In deployment, systems that use speech as input must make use of automated transcriptions. Yet, typically when these systems are evaluated, gold transcriptions are assumed. We explicitly examine the impact of transcription errors on the downstream performance of a multi-modal system on three related tasks from three datasets: emotion, sarcasm, and personality detection. We include three separate transcription tools and show that while all automated transcriptions propagate errors that substantially impact downstream performance, the open-source tools fair worse than the paid tool, though not always straightforwardly, and word error rates do not correlate well with downstream performance. We further find that the inclusion of audio features partially mitigates transcription errors, but that a naive usage of a multi-task setup does not. We make available all code and data splits needed to reproduce all of our experiments. 1", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "In deployment, systems that use speech as input must make use of automated transcriptions. Yet, typically when these systems are evaluated, gold transcriptions are assumed. We explicitly examine the impact of transcription errors on the downstream performance of a multi-modal system on three related tasks from three datasets: emotion, sarcasm, and personality detection. We include three separate transcription tools and show that while all automated transcriptions propagate errors that substantially impact downstream performance, the open-source tools fair worse than the paid tool, though not always straightforwardly, and word error rates do not correlate well with downstream performance. We further find that the inclusion of audio features partially mitigates transcription errors, but that a naive usage of a multi-task setup does not. We make available all code and data splits needed to reproduce all of our experiments. 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "With the large amount of available speech data, multimodal approaches to classic natural language processing tasks are becoming increasingly prevalent. Many of these proposed systems, however, demonstrate their gains under the assumption of gold transcriptions (e.g., Ghosal et al., 2020; Castro et al., 2019) , a condition which is highly unrealistic in real-world scenarios. In practice, deployed systems will need to utilize an automatic speech recognition (ASR) tool to obtain transcriptions. However, when selecting the best tool given the constraints of the use case, the results of an intrinsic evaluation such as the word error rate (WER) are not necessarily correlated with extrinsic performance on the downstream task of interest (Faruqui et al., 2016) . This issue is exacerbated when considering that ASR tools may perform quite differently across different domains (Georgila et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 268, |
|
"end": 288, |
|
"text": "Ghosal et al., 2020;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 289, |
|
"end": 309, |
|
"text": "Castro et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 740, |
|
"end": 762, |
|
"text": "(Faruqui et al., 2016)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 878, |
|
"end": 901, |
|
"text": "(Georgila et al., 2020)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this initial work, we explore how much transcription errors affect performance of a multimodal system on several related but distinct downstream tasks and domains. We compare two open-source and one paid transcription tools and evaluate on three multimodal English-language tasks: emotion, sarcasm, and personality detection. We explicitly compare intrinsic and extrinsic evaluations, and discuss the utility of WER as an indicator of taskperformance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our specific contributions are:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "1. An exploration of the relationship between transcription WER and performance on downstream multimodal tasks. We show that overall, the transcriptions from the paid tool may be more useful than those of the open-source tools, but that they perform worse than gold. Further, we show that differences in WER do not describe well the differences in the downstream tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "2. We explore using multitask (MT) training for mitigating issues with transcription quality. We find that, in this setting, MT does not help model performance, suggesting that problems with transcription quality need a more thoughtful approach to overcome. On the other hand, we show that inclusion of the audio modality does improve performance for all datasets when using automatic transcriptions, indicating that additional modalities can be helpful to mitigate transcription errors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Multimodal language work frequently makes use of video, audio, and text modalities to examine emo-tion (Zadeh et al., 2018) , sentiment (Soleymani et al., 2017) , and personality (Rissola et al., 2019) . Previous work in emotion recognition has used speech features alone (Latif et al., 2020) , speech and text (Atmaja and Akagi, 2020), and speech, text, and video (Tsai et al., 2018) to make predictions. Ghosal et al. (2020) use transformer-based models to establish a new state of the art on multiple multimodal datasets. Recent work in personality detection has often examined the OCEAN traits (openness, conscientiousness, extraversion, agreeableness, and neuroticism; Ponce-L\u00f3pez et al., 2016) , using either apparent (as perceived by others) (Yan et al., 2020) or self-reported traits (Celli et al., 2014) . ASR systems have also been emphasized in recent decades. Some can be custom trained or used with pretrained models (Povey et al., 2011; Lamere et al., 2003) , while others are extensively trained but limited in their customization (Bano et al., 2020) . Georgila et al. (2020) examine the performance of different ASR transcription tools on multiple ASR datasets, providing a strong reference for WERs; however, to the best of our knowledge, we are the first to compare this form of intrinsic performance to performance on the downstream tasks of emotion, personality, and sarcasm detection.", |
|
"cite_spans": [ |
|
{ |
|
"start": 103, |
|
"end": 123, |
|
"text": "(Zadeh et al., 2018)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 136, |
|
"end": 160, |
|
"text": "(Soleymani et al., 2017)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 179, |
|
"end": 201, |
|
"text": "(Rissola et al., 2019)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 272, |
|
"end": 292, |
|
"text": "(Latif et al., 2020)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 311, |
|
"end": 322, |
|
"text": "(Atmaja and", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 323, |
|
"end": 340, |
|
"text": "Akagi, 2020), and", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 341, |
|
"end": 358, |
|
"text": "speech, text, and", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 359, |
|
"end": 384, |
|
"text": "video (Tsai et al., 2018)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 406, |
|
"end": 426, |
|
"text": "Ghosal et al. (2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 598, |
|
"end": 673, |
|
"text": "(openness, conscientiousness, extraversion, agreeableness, and neuroticism;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 674, |
|
"end": 699, |
|
"text": "Ponce-L\u00f3pez et al., 2016)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 749, |
|
"end": 767, |
|
"text": "(Yan et al., 2020)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 792, |
|
"end": 812, |
|
"text": "(Celli et al., 2014)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 930, |
|
"end": 950, |
|
"text": "(Povey et al., 2011;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 951, |
|
"end": 971, |
|
"text": "Lamere et al., 2003)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1046, |
|
"end": 1065, |
|
"text": "(Bano et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1068, |
|
"end": 1090, |
|
"text": "Georgila et al. (2020)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "When multimodal data including speech is used in a deployed system, the related text features generally come from automatic transcriptions of the speech itself. Here, we compare how errors in these transcriptions affect the performance of a neural network model on three distinct tasks. Further, we compare results from both a single-task and a multitask (MT) network composed of data from two datasets, a common strategy for mitigating issues with limited or flawed data (Schulz et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 472, |
|
"end": 493, |
|
"text": "(Schulz et al., 2018)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "As we are not trying to define a new state of the art, we use a simple model for our experiments. This model takes as input text and audio features from MELD and FirstImpr (Section 4), and feeds them through a late-fusion network (i.e., one which processes the modalities separately, then concatenates them before predicting). In the MT setting, the final layers are task specific, and the base layers share parameters.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We generate transcriptions for each dataset using three separate ASR systems (Section 4.3) and find the intrinsic performance (WER 2 ; Section 6.1) as well as the extrinsic performance for each (i.e., performance on the downstream tasks of interest; Section 6).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "For our experiments we use three datasets covering distinct tasks. For each data point, we extract acoustic features and obtain three different transcriptions, in addition to the dataset-provided gold transcriptions, which form the basis for our comparative study. Dataset sizes are shown in Table 1 ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 292, |
|
"end": 299, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Our selected datasets represent distinct tasks that may have different levels of reliance upon each modality for successful prediction. For example, while emotions and personality may be expressed through word choice as much as pronunciation, sarcasm detection should rely much more heavily upon acoustics.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Zahiri and Choi, 2017; Poria et al., 2019): MELD provides 13708 annotated utterances (< 5 words, with an average length of 3.59s) from 1,433 dialogues from the TV series Friends. Utterances are annotated for emotion (anger, disgust, sadness, joy, neutral, surprise and fear) and sentiment.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multimodal Emotion Lines Dataset (MELD;", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Multimodal Sarcasm Detection (MUStARD; Castro et al., 2019) : MUStARD is a collection of 690 utterances (average of 14 tokens and 5.22s) from Friends, The Golden Girls, The Big Bang Theory, and Sarcasmaholics Anonymous. Each is gold-annotated as sarcastic or non-sarcastic.", |
|
"cite_spans": [ |
|
{ |
|
"start": 39, |
|
"end": 59, |
|
"text": "Castro et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multimodal Emotion Lines Dataset (MELD;", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "First Impressions V2 dataset (FirstImpr; Ponce-L\u00f3pez et al., 2016): FirstImpr contains 10,000 English utterances (average length 15s) taken from 3,000 YouTube video blogs and annotated for the OCEAN personality traits.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multimodal Emotion Lines Dataset (MELD;", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We extract acoustic features with the Open-source Speech and Music Interpretation by Large-space Extraction toolkit v2.3.0 (OpenSMILE; Eyben et al., 2010) . We use the INTERSPEECH 2010 (IS10; or 2013 Paralinguistics Challenges (IS13; Schuller et al., 2013) features, using the set that performed the best on a task's development partition. These sets contain low-level descriptors (such as MFCCs and fundamental frequency) and the associated functionals, extracted at 10ms intervals (total 76 for IS10, 141 for IS13).", |
|
"cite_spans": [ |
|
{ |
|
"start": 135, |
|
"end": 154, |
|
"text": "Eyben et al., 2010)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We capture features only from the middle 50 percent of each audio file, and calculate mean feature values per utterance for both feature sets, with minimum, maximum, and mean plus/minus standard deviation concatenated to this for IS10.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Our text features consist of tokens extracted directly from the transcripts using the basic english tokenizer in torchtext. 4 We compare the transcriptions from the following:", |
|
"cite_spans": [ |
|
{ |
|
"start": 124, |
|
"end": 125, |
|
"text": "4", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text features", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "CMU Sphinx Open Source Toolkit (Sphinx; Lamere et al., 2003) :", |
|
"cite_spans": [ |
|
{ |
|
"start": 40, |
|
"end": 60, |
|
"text": "Lamere et al., 2003)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text features", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "We utilize the opensource, lightweight PocketSphinx 5 version of Sphinx for transcription, using the pretrained acoustic and language models provided by CMU.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text features", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Google Cloud Speech-to-Text 6 (Google): Google Cloud Speech-to-Text is a commercial tool trained on data collected by Google and provided by users who have used Speech-to-Text and agreed to share their data. We use synchronous speech recognition.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text features", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Kaldi Speech Recognition Toolkit (Kaldi; Povey et al., 2011):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text features", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Kaldi is an open-source tool for speech recognition. We use the Librispeech ASR model, 7 which is trained on Librispeech (Panayotov et al., 2015), and the online-decoding function.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text features", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Gold: Each dataset also provides a gold transcription. For MELD this is extracted from subtitles, MUStARD's comes both from subtitles and manual transcription, and FirstImpr's comes from a professional transcription service. With these we calculate the WERs (Section 6.1) and a ceiling performance in our extrinsic evaluations (Section 6).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text features", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "To evaluate the impact of transcription on performance, we create (a) baseline models that use only text or only audio features, plus (b) a multimodal model that uses audio and text. Note that as our goal here is not to achieve a new state of the art, but rather to explore the impact of real-world options for transcriptions, we use straightforward models that are not particularly architecturally tuned.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Text-only baseline: The text baseline consists of a two-layer LSTM (Hochreiter and Schmidhuber, 1997) . 300d GloVe embeddings trained on 42B words (Pennington et al., 2014) are concatenated with 30d trainable text embeddings and fed through the network. The 100d output vectors representing each utterance are then fed through a prediction layer with a cross-entropy loss function 8 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 67, |
|
"end": 101, |
|
"text": "(Hochreiter and Schmidhuber, 1997)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 147, |
|
"end": 172, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Audio-only baseline The audio baseline is a simple feedforward neural network, where extracted audio features are averaged over the course of the utterance, 9 and used as input into two fully connected layers. The first layer decreases the audio to a 50-dimensional vector, while the second increases it back to its original size. The vector is finally fed through a prediction layer. We use IS10 features for MUStARD and IS13 for MELD and FirstImpr, chosen based on dev performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Multimodal model (MM) For our multimodal model, we concatenate the 100d output from the text component to the 141d (IS13) or 380d (IS10) output of the acoustic layers. These vectors are then fed through two fully connected layers and a prediction layer. This model works in both single task and multitask settings. As a multitask network (MM-MT), the two final layers are distinct for each dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We evaluate how the automated transcriptions from different ASR tools affect performance on three distinct downstream tasks. We show the intrinsic performance of the tools (in terms of WERs) and compare it to the extrinsic performance. show the difficulty of ASR tools with these datasets' speech.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments and Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We use the provided gold transcriptions to calculate WERs, shown in Table 2 . We see that Google consistently has lower WERs than either of the open-source tools, and that Kaldi consistently outperforms Sphinx, though the difference is smaller in MELD than the other datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 68, |
|
"end": 75, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Intrinsic Transcription Evaluation", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "The results of our models trained on each transcription type for each task is given in Table 3 . We report all results as weighted average F1 scores over all classes and we evaluate statistical significance by calculating p-values with bootstrap resampling over 10,000 iterations on model predictions with Bonferroni correction applied. We include the comparable 10 state of the art (SotA) performance for reference for MELD and MUStARD. As FirstImpr was previously evaluated as a regression task, and here we perform maximum-class prediction for consistency, there is no relevant SotA to include. For MELD, , use a CNN over GloVe embeddings and an LSTM over audio features from OpenSMILE. 11 For MUStARD, Castro et al. (2019) employ SVMs trained over the BERT (Devlin et al., 2018) encoding of the utterance, combined with extracted audio features, averaged over the utterance. We include this result only to give context to what we report; recall that here we evaluate on a test split, whereas they used 5-fold cross validation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 706, |
|
"end": 726, |
|
"text": "Castro et al. (2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 761, |
|
"end": 782, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 87, |
|
"end": 94, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Extrinsic Evaluation", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "For text-only results, ASR transcriptions show significantly lower performance than the gold transcriptions in MELD (p<0.001) and, to a smaller degree, MUStARD, although the Kaldi transcriptions are better in FirstImpr (p<0.001). With audio included, i.e., the multimodal systems, we again see that gold transcription models achieve higher performance than those with ASR transcriptions for MELD (p<0.001) and MUStARD. For FirstImpr, the Google transcriptions yield the best performance, although this difference is not significant. However, when only considering the ASR models (i.e., in the deployment scenario), the inclusion of audio features improves all models, indicating that these features are able to mitigate some of the noise arising from imperfect transcriptions. That said, it is worth noting that for MUStARD, the best performance is achieved by using audio features only, though again this difference is not significant.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extrinsic Evaluation", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Among the ASR systems, we see that despite the substantial differences in WERs (Table 2) , on the extrinsic evaluation, the story is more nuanced. Google transcription models do the best on MELD and FirstImpr, but on MUStARD, Sphinx transcriptions have the best performance. Further, none of hammad and Turney, 2010, 2013) for each emotion type appearing in the MELD transcriptions. Each word may be associated with more than one emotion, so the overall count is lower than the sum of the individual emotions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 292, |
|
"end": 302, |
|
"text": "hammad and", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 303, |
|
"end": 322, |
|
"text": "Turney, 2010, 2013)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 79, |
|
"end": 88, |
|
"text": "(Table 2)", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Extrinsic Evaluation", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "these differences are statistically significant. Thus, depending on the task of interest, there may be no large advantage to more expensive tools. We also experiment with using a multi-task (MT) setup to determine whether using MT can mitigate transcription errors. We use only MELD and FirstImpr, as they both did best with the IS13 features. Further, the majority of utterances in MUStARD appear in MELD and cannot be restricted to the training partition, so it cannot be used fairly in a multitask system with MELD. For all transcription types, the MT setup performs similarly to the corresponding multimodal single-task models, suggesting that a more thoughtful approach may be needed to leverage external data to mitigate transcription noise, which is beyond the scope of the current work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extrinsic Evaluation", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "While transcription WERs are not intrinsically linked to model performance on a global level, it is possible that additional factors are at play. As such, we analyze GloVe's coverage for each transcription of each dataset and examine emotion words present in each transcription for MELD. Table 4 reveal that gold transcriptions contain both a larger number of overall tokens and a smaller percentage of coverage by GloVe than all other datasets. This larger number of tokens may allow a system to make fine-grained distinction, indicating potential need for caution with automatic transcription selection depending upon the dataset of interest. Google has the second highest number of tokens covered in GloVe for FirstImpr and MUStARD, while Sphinx has the second most for MELD. As models using sphinx transcriptions perform numerically worst with MELD, this indicates that GloVe coverage alone does not always correspond to downstream task performance.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 288, |
|
"end": 295, |
|
"text": "Table 4", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Error analysis", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "For more fine-grained detail, we examine transcription success in one particular domain: identification of emotion words. To do this, we determine the number of emotion words identified for each transcription using the NRC Word-Emotion Asso-ciation Lexicon (EmoLex; Turney, 2010, 2013) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 266, |
|
"end": 285, |
|
"text": "Turney, 2010, 2013)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results of", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "As shown in Table 5 , between the gold and automatic transcriptions, we see different patterns between the type and token frequencies of emotion words. Overall, the token frequency of emotion words is lower in the automatic transcripts than in the gold transcriptions, largely due to the increase in tokens of words expressing joy and fear, but the type frequency is higher, particularly with Kaldi and Sphinx. That is, there are fewer distinct emotion words in the gold transcriptions but they have more mentions. These frequency patterns may be partially responsible for the worse performance of models using Kaldi and Sphinx transcriptions, as some data points contain spurious false positive mentions of emotion words, and other data points are missing mentions of emotion words, due to transcription error.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 19, |
|
"text": "Table 5", |
|
"ref_id": "TABREF8" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results of", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We demonstrated that selecting the appropriate transcription service for a task is dependent upon the task and models to be used. Paid transcriptions may result in better model performance, but differences in task performance may be much smaller than suggested by only considering WER. Further, depending on the task, the noise introduced from automatic transcriptions may be mitigated by including additional input modalities, such as audio.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "https://github.com/clulab/ tomcat-speech", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We use the JiWER Python library (https://pypi. org/project/jiwer/) and the dataset-provided transcriptions to calculate WER.3 Note that for MELD we redistributed the train and dev partitions to make dev closer in size to test (we did not modify test in any way).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://pytorch.org/text/stable/index. html 5 https://github.com/cmusphinx/ pocketsphinx 6 https://cloud.google.com/ speech-to-text 7 https://kaldi-asr.org/models/m13", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "used for this and all other models 9 While we also experimented with a version that used an RNN over the acoustic features, we found that it did not affect performance and it was far slower to train.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Since we do not use the surrounding context or external resources in this initial work, we provide the results of the previous best performing system that did likewise.11 We are reading between the lines on this, as the paper does not provide clear explanation of this model, which they call cMKL.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "Research was sponsored by the Army Research Office, Grant Number W911NF2010002. The views and conclusions are those of the authors and should not be interpreted as representing official policies, expressed or implied, of the Army Research Office or U.S. Government. Rebecca Sharp declares a financial interest in lum.ai. This interest has been properly disclosed to the University of Arizona Institutional Review Committee and is managed in accordance with its conflict of interest policies.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Dimensional speech emotion recognition from speech features and word embeddings by using multitask learning", |
|
"authors": [ |
|
{ |
|
"first": "Masato", |
|
"middle": [], |
|
"last": "Bagus Tris Atmaja", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Akagi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "APSIPA Transactions on Signal and Information Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1017/ATSIP.2020.14" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bagus Tris Atmaja and Masato Akagi. 2020. Dimen- sional speech emotion recognition from speech fea- tures and word embeddings by using multitask learn- ing. APSIPA Transactions on Signal and Informa- tion Processing, 9.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Gorsa Lakshmi Niharika, and Yalavarthi Sikhi. 2020. Speech to text translation enabling multilingualism", |
|
"authors": [ |
|
{ |
|
"first": "Shahana", |
|
"middle": [], |
|
"last": "Bano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pavuluri", |
|
"middle": [], |
|
"last": "Jithendra", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "2020 IEEE International Conference for Innovation in Technology (INOCON)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--4", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/INOCON50539.2020.9298280" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shahana Bano, Pavuluri Jithendra, Gorsa Lakshmi Ni- harika, and Yalavarthi Sikhi. 2020. Speech to text translation enabling multilingualism. In 2020 IEEE International Conference for Innovation in Technol- ogy (INOCON), pages 1-4. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Towards multimodal sarcasm detection (an obviously perfect paper)", |
|
"authors": [ |
|
{ |
|
"first": "Santiago", |
|
"middle": [], |
|
"last": "Castro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devamanyu", |
|
"middle": [], |
|
"last": "Hazarika", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ver\u00f3nica", |
|
"middle": [], |
|
"last": "P\u00e9rez-Rosas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roger", |
|
"middle": [], |
|
"last": "Zimmermann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soujanya", |
|
"middle": [], |
|
"last": "Poria", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Santiago Castro, Devamanyu Hazarika, Ver\u00f3nica P\u00e9rez- Rosas, Roger Zimmermann, Rada Mihalcea, and Soujanya Poria. 2019. Towards multimodal sarcasm detection (an obviously perfect paper). In Pro- ceedings of the 57th Annual Meeting of the Associa- tion for Computational Linguistics (Volume 1: Long Papers), Florence, Italy. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Automatic personality and interaction style recognition from facebook profile pictures", |
|
"authors": [ |
|
{ |
|
"first": "Fabio", |
|
"middle": [], |
|
"last": "Celli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elia", |
|
"middle": [], |
|
"last": "Bruni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bruno", |
|
"middle": [], |
|
"last": "Lepri", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 22nd ACM international conference on Multimedia", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1101--1104", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/2647868.2654977" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fabio Celli, Elia Bruni, and Bruno Lepri. 2014. Au- tomatic personality and interaction style recognition from facebook profile pictures. In Proceedings of the 22nd ACM international conference on Multime- dia, pages 1101-1104.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2019 Conference ofthe North American Chapter of the Association for Computational Linguistics: Human LanguageTechnologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference ofthe North American Chapter of the Association for Computational Linguistics: Human LanguageTech- nologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Opensmile: the Munich versatile and fast open-source audio feature extractor", |
|
"authors": [ |
|
{ |
|
"first": "Florian", |
|
"middle": [], |
|
"last": "Eyben", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "W\u00f6llmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bj\u00f6rn", |
|
"middle": [], |
|
"last": "Schuller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 18th ACM international conference on Multimedia", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1459--1462", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/1873951.1874246" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Florian Eyben, Martin W\u00f6llmer, and Bj\u00f6rn Schuller. 2010. Opensmile: the Munich versatile and fast open-source audio feature extractor. In Proceedings of the 18th ACM international conference on Multi- media, pages 1459-1462.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Problems with evaluation of word embeddings using word similarity tasks", |
|
"authors": [ |
|
{ |
|
"first": "Manaal", |
|
"middle": [], |
|
"last": "Faruqui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yulia", |
|
"middle": [], |
|
"last": "Tsvetkov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pushpendre", |
|
"middle": [], |
|
"last": "Rastogi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1605.02276" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Manaal Faruqui, Yulia Tsvetkov, Pushpendre Rastogi, and Chris Dyer. 2016. Problems with evaluation of word embeddings using word similarity tasks. arXiv preprint arXiv:1605.02276.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Evaluation of off-the-shelf speech recognizers across diverse dialogue domains", |
|
"authors": [ |
|
{ |
|
"first": "Kallirroi", |
|
"middle": [], |
|
"last": "Georgila", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anton", |
|
"middle": [], |
|
"last": "Leuski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Volodymyr", |
|
"middle": [], |
|
"last": "Yanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Traum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of The 12th Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6469--6476", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kallirroi Georgila, Anton Leuski, Volodymyr Yanov, and David Traum. 2020. Evaluation of off-the-shelf speech recognizers across diverse dialogue domains. In Proceedings of The 12th Language Resources and Evaluation Conference, pages 6469-6476.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Rada Mihalcea, and Soujanya Poria. 2020. Cosmic: Commonsense knowledge for emotion identification in conversations", |
|
"authors": [ |
|
{ |
|
"first": "Deepanway", |
|
"middle": [], |
|
"last": "Ghosal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Navonil", |
|
"middle": [], |
|
"last": "Majumder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Gelbukh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2010.02795" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Deepanway Ghosal, Navonil Majumder, Alexander Gelbukh, Rada Mihalcea, and Soujanya Poria. 2020. Cosmic: Commonsense knowledge for emotion identification in conversations. arXiv preprint arXiv:2010.02795.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Long short-term memory", |
|
"authors": [ |
|
{ |
|
"first": "Sepp", |
|
"middle": [], |
|
"last": "Hochreiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00fcrgen", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Neural computation", |
|
"volume": "9", |
|
"issue": "8", |
|
"pages": "1735--1780", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/neco.1997.9.8.1735" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation, 9(8):1735-1780.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "The cmu sphinx-4 speech recognition system", |
|
"authors": [ |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Lamere", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Kwok", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evandro", |
|
"middle": [], |
|
"last": "Gouvea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bhiksha", |
|
"middle": [], |
|
"last": "Raj", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rita", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Walker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manfred", |
|
"middle": [], |
|
"last": "Warmuth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paul Lamere, Philip Kwok, Evandro Gouvea, Bhiksha Raj, Rita Singh, William Walker, Manfred Warmuth, and Peter Wolf. 2003. The cmu sphinx-4 speech recognition system.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Multi-task semi-supervised adversarial autoencoding for speech emotion recognition", |
|
"authors": [ |
|
{ |
|
"first": "Siddique", |
|
"middle": [], |
|
"last": "Latif", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rajib", |
|
"middle": [], |
|
"last": "Rana", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Khalifa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raja", |
|
"middle": [], |
|
"last": "Jurdak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Epps", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bj\u00f3rn Wolfgang", |
|
"middle": [], |
|
"last": "Schuller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "IEEE Transactions on Affective Computing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/TAFFC.2020.2983669" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Siddique Latif, Rajib Rana, Sara Khalifa, Raja Jurdak, Julien Epps, and Bj\u00f3rn Wolfgang Schuller. 2020. Multi-task semi-supervised adversarial autoencod- ing for speech emotion recognition. IEEE Transac- tions on Affective Computing.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Emotions evoked by common words and phrases: Using mechanical turk to create an emotion lexicon", |
|
"authors": [ |
|
{ |
|
"first": "Saif", |
|
"middle": [], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Turney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the NAACL HLT 2010 workshop on computational approaches to analysis and generation of emotion in text", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "26--34", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Saif Mohammad and Peter Turney. 2010. Emotions evoked by common words and phrases: Using me- chanical turk to create an emotion lexicon. In Pro- ceedings of the NAACL HLT 2010 workshop on com- putational approaches to analysis and generation of emotion in text, pages 26-34.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Crowdsourcing a word-emotion association lexicon", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Saif", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Turney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Computational intelligence", |
|
"volume": "29", |
|
"issue": "3", |
|
"pages": "436--465", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1111/j.1467-8640.2012.00460.x" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Saif M Mohammad and Peter D Turney. 2013. Crowd- sourcing a word-emotion association lexicon. Com- putational intelligence, 29(3):436-465.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Librispeech: An asr corpus based on public domain audio books", |
|
"authors": [ |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Panayotov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Povey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Khudanpur", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "2015 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5206--5210", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/ICASSP.2015.7178964" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "V. Panayotov, G. Chen, D. Povey, and S. Khudanpur. 2015. Librispeech: An asr corpus based on public domain audio books. In 2015 IEEE International Conference on Acoustics, Speech and Signal Pro- cessing (ICASSP), pages 5206-5210.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher D Manning. 2014. Glove: Global vectors for word rep- resentation. In Proceedings of the 2014 conference on empirical methods in natural language process- ing (EMNLP), pages 1532-1543.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Chalearn lap 2016: First round challenge on first impressions-dataset and results", |
|
"authors": [ |
|
{ |
|
"first": "V\u00edctor", |
|
"middle": [], |
|
"last": "Ponce-L\u00f3pez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Baiyu", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc", |
|
"middle": [], |
|
"last": "Oliu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ciprian", |
|
"middle": [], |
|
"last": "Corneanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Albert", |
|
"middle": [], |
|
"last": "Clap\u00e9s", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Isabelle", |
|
"middle": [], |
|
"last": "Guyon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xavier", |
|
"middle": [], |
|
"last": "Bar\u00f3", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hugo", |
|
"middle": [ |
|
"Jair" |
|
], |
|
"last": "Escalante", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergio", |
|
"middle": [], |
|
"last": "Escalera", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "European conference on computer vision", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "400--418", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "V\u00edctor Ponce-L\u00f3pez, Baiyu Chen, Marc Oliu, Ciprian Corneanu, Albert Clap\u00e9s, Isabelle Guyon, Xavier Bar\u00f3, Hugo Jair Escalante, and Sergio Escalera. 2016. Chalearn lap 2016: First round challenge on first impressions-dataset and results. In Euro- pean conference on computer vision, pages 400-418. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Meld: A multimodal multi-party dataset for emotion recognition in conversations", |
|
"authors": [ |
|
{ |
|
"first": "Soujanya", |
|
"middle": [], |
|
"last": "Poria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devamanyu", |
|
"middle": [], |
|
"last": "Hazarika", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Navonil", |
|
"middle": [], |
|
"last": "Majumder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gautam", |
|
"middle": [], |
|
"last": "Naik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Cambria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "527--536", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Soujanya Poria, Devamanyu Hazarika, Navonil Ma- jumder, Gautam Naik, Erik Cambria, and Rada Mi- halcea. 2019. Meld: A multimodal multi-party dataset for emotion recognition in conversations. In Proceedings of the 57th Annual Meeting of the As- sociation for Computational Linguistics, pages 527- 536, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "The kaldi speech recognition toolkit", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Povey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arnab", |
|
"middle": [], |
|
"last": "Ghoshal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gilles", |
|
"middle": [], |
|
"last": "Boulianne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lukas", |
|
"middle": [], |
|
"last": "Burget", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ondrej", |
|
"middle": [], |
|
"last": "Glembek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nagendra", |
|
"middle": [], |
|
"last": "Goel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirko", |
|
"middle": [], |
|
"last": "Hannemann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Petr", |
|
"middle": [], |
|
"last": "Motlicek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanmin", |
|
"middle": [], |
|
"last": "Qian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Petr", |
|
"middle": [], |
|
"last": "Schwarz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Silovsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Georg", |
|
"middle": [], |
|
"last": "Stemmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karel", |
|
"middle": [], |
|
"last": "Vesely", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "IEEE 2011 Workshop on Automatic Speech Recognition and Understanding", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Povey, Arnab Ghoshal, Gilles Boulianne, Lukas Burget, Ondrej Glembek, Nagendra Goel, Mirko Hannemann, Petr Motlicek, Yanmin Qian, Petr Schwarz, Jan Silovsky, Georg Stemmer, and Karel Vesely. 2011. The kaldi speech recognition toolkit. In IEEE 2011 Workshop on Automatic Speech Recognition and Understanding. IEEE Signal Pro- cessing Society. IEEE Catalog No.: CFP11SRW- USB.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Personality recognition in conversations using capsule neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Esteban", |
|
"middle": [], |
|
"last": "Andres Rissola", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Seyed", |
|
"middle": [ |
|
"Ali" |
|
], |
|
"last": "Bahrainian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fabio", |
|
"middle": [], |
|
"last": "Crestani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "IEEE/WIC/ACM International Conference on Web Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "180--187", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3350546.3352516" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Esteban Andres Rissola, Seyed Ali Bahrainian, and Fabio Crestani. 2019. Personality recognition in conversations using capsule neural networks. In IEEE/WIC/ACM International Conference on Web Intelligence, pages 180-187.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "The interspeech 2010 paralinguistic challenge", |
|
"authors": [ |
|
{ |
|
"first": "Bj\u00f6rn", |
|
"middle": [], |
|
"last": "Schuller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Steidl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anton", |
|
"middle": [], |
|
"last": "Batliner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Burkhardt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laurence", |
|
"middle": [], |
|
"last": "Devillers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shrikanth S", |
|
"middle": [], |
|
"last": "Narayanan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Eleventh Annual Conference of the International Speech Communication Association", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bj\u00f6rn Schuller, Stefan Steidl, Anton Batliner, Felix Burkhardt, Laurence Devillers, Christian M\u00fcller, and Shrikanth S Narayanan. 2010. The interspeech 2010 paralinguistic challenge. In Eleventh Annual Conference of the International Speech Communica- tion Association.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "The interspeech 2013 computational paralinguistics challenge: Social signals, conflict, emotion, autism", |
|
"authors": [ |
|
{ |
|
"first": "Bj\u00f6rn", |
|
"middle": [], |
|
"last": "Schuller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Steidl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anton", |
|
"middle": [], |
|
"last": "Batliner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Vinciarelli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klaus", |
|
"middle": [], |
|
"last": "Scherer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fabien", |
|
"middle": [], |
|
"last": "Ringeval", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohamed", |
|
"middle": [], |
|
"last": "Chetouani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Weninger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Florian", |
|
"middle": [], |
|
"last": "Eyben", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Marchi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "14th Annual Conference of the International Speech Communication Association", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bj\u00f6rn Schuller, Stefan Steidl, Anton Batliner, Alessan- dro Vinciarelli, Klaus Scherer, Fabien Ringeval, Mo- hamed Chetouani, Felix Weninger, Florian Eyben, Erik Marchi, et al. 2013. The interspeech 2013 com- putational paralinguistics challenge: Social signals, conflict, emotion, autism. In Proceedings INTER- SPEECH 2013, 14th Annual Conference of the Inter- national Speech Communication Association, Lyon, France.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Multi-task learning for argumentation mining in low-resource settings", |
|
"authors": [ |
|
{ |
|
"first": "Claudia", |
|
"middle": [], |
|
"last": "Schulz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steffen", |
|
"middle": [], |
|
"last": "Eger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johannes", |
|
"middle": [], |
|
"last": "Daxenberger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tobias", |
|
"middle": [], |
|
"last": "Kahse", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 16th Annual Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Claudia Schulz, Steffen Eger, Johannes Daxenberger, Tobias Kahse, and Iryna Gurevych. 2018. Multi-task learning for argumentation mining in low-resource settings. In Proceedings of the 16th Annual Con- ference of the North American Chapter of the Asso- ciation for Computational Linguistics: Human Lan- guage Technologies. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "A survey of multimodal sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Soleymani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Garcia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brendan", |
|
"middle": [], |
|
"last": "Jou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bj\u00f6rn", |
|
"middle": [], |
|
"last": "Schuller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shih-Fu", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maja", |
|
"middle": [], |
|
"last": "Pantic", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Image and Vision Computing", |
|
"volume": "65", |
|
"issue": "", |
|
"pages": "3--14", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.imavis.2017.08.003" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohammad Soleymani, David Garcia, Brendan Jou, Bj\u00f6rn Schuller, Shih-Fu Chang, and Maja Pantic. 2017. A survey of multimodal sentiment analysis. Image and Vision Computing, 65:3-14.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Learning factorized multimodal representations", |
|
"authors": [], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yao-Hung Hubert Tsai, Paul Pu Liang, Amir Zadeh, Louis-Philippe Morency, and Ruslan Salakhutdinov. 2018. Learning factorized multimodal representa- tions. ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Mitigating biases in multimodal personality assessment", |
|
"authors": [ |
|
{ |
|
"first": "Shen", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Di", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Soleymani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 International Conference on Multimodal Interaction", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "361--369", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3382507.3418889" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shen Yan, Di Huang, and Mohammad Soleymani. 2020. Mitigating biases in multimodal personality assessment. In Proceedings of the 2020 Interna- tional Conference on Multimodal Interaction, pages 361-369.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Multi-attention recurrent network for human communication comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Amir", |
|
"middle": [], |
|
"last": "Zadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [ |
|
"Pu" |
|
], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soujanya", |
|
"middle": [], |
|
"last": "Poria", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amir Zadeh, Paul Pu Liang, Soujanya Poria, Pra- teek Vij, Erik Cambria, and Louis-Philippe Morency. 2018. Multi-attention recurrent network for human communication comprehension. Proceedings of the AAAI Conference on Artificial Intelligence, 32(1).", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Emotion detection on TV show transcripts with sequence-based convolutional neural networks", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Sayyed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jinho D", |
|
"middle": [], |
|
"last": "Zahiri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings ofthe AAAI Workshop on Affective Content Analysis", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "44--51", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sayyed M Zahiri and Jinho D Choi. 2017. Emotion de- tection on TV show transcripts with sequence-based convolutional neural networks. In Proceedings ofthe AAAI Workshop on Affective Content Analysis, pages 44-51, New Orleans, LA.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF1": { |
|
"num": null, |
|
"type_str": "table", |
|
"text": "", |
|
"content": "<table/>", |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"type_str": "table", |
|
"text": "WER of MELD, MUStARD, and FirstImpr; results", |
|
"content": "<table/>", |
|
"html": null |
|
}, |
|
"TABREF5": { |
|
"num": null, |
|
"type_str": "table", |
|
"text": "", |
|
"content": "<table><tr><td colspan=\"4\">: Extrinsic task performance of our single-task system</td></tr><tr><td colspan=\"4\">using acoustic features (Aud), text features (Txt), or multi-</td></tr><tr><td colspan=\"4\">modal (MM). We also show performance of the multimodal</td></tr><tr><td colspan=\"4\">multitask system (MM-MT) that utilizes MELD and FirstImpr.</td></tr><tr><td colspan=\"4\">All results presented are weighted average F1.</td></tr><tr><td/><td colspan=\"3\">MELD MUStARD FirstImpr</td></tr><tr><td>Gold</td><td>139580</td><td>10991</td><td>523387</td></tr><tr><td/><td>(95.7%)</td><td>(98.7%)</td><td>(98.4%)</td></tr><tr><td>Google</td><td>88913</td><td>7315</td><td>436272</td></tr><tr><td/><td>(99.8%)</td><td>(99.8%)</td><td>(100%)</td></tr><tr><td>Kaldi</td><td>89082</td><td>7039</td><td>369201</td></tr><tr><td/><td>(100%)</td><td>(99.9%)</td><td>(100%)</td></tr><tr><td>Sphinx</td><td>103659</td><td>6891</td><td>419214</td></tr><tr><td/><td>(100%)</td><td>(100%)</td><td>(100%)</td></tr></table>", |
|
"html": null |
|
}, |
|
"TABREF6": { |
|
"num": null, |
|
"type_str": "table", |
|
"text": "GloVe coverage (in tokens) for each transcription type with each dataset. Percentage of all corpus tokens appearing in GloVe are shown in parentheses.", |
|
"content": "<table/>", |
|
"html": null |
|
}, |
|
"TABREF8": { |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Number of tokens (left) and types (right) of words from the NRC Word-Emotion Association Lexicon (Mo", |
|
"content": "<table/>", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |