|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T06:07:34.110950Z" |
|
}, |
|
"title": "Multi-Emotion Classification for Song Lyrics", |
|
"authors": [ |
|
{ |
|
"first": "Darren", |
|
"middle": [], |
|
"last": "Edmonds", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "ICS University of California", |
|
"location": { |
|
"settlement": "Irvine" |
|
} |
|
}, |
|
"email": "dedmond1@uci.edu" |
|
}, |
|
{ |
|
"first": "Jo\u00e3o", |
|
"middle": [], |
|
"last": "Sedoc", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "New York University", |
|
"location": {} |
|
}, |
|
"email": "jsedoc@stern.nyu.edu" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Song lyrics convey a multitude of emotions to the listener and powerfully portray the emotional state of the writer or singer. This paper examines a variety of modeling approaches to the multi-emotion classification problem for songs. We introduce the Edmonds Dance dataset, a novel emotion-annotated lyrics dataset from the reader's perspective, and annotate the dataset of Mihalcea and Strapparava (2012) at the song level. We find that models trained on relatively small song datasets achieve marginally better performance than BERT (Devlin et al., 2019) finetuned on large social media or dialog datasets.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Song lyrics convey a multitude of emotions to the listener and powerfully portray the emotional state of the writer or singer. This paper examines a variety of modeling approaches to the multi-emotion classification problem for songs. We introduce the Edmonds Dance dataset, a novel emotion-annotated lyrics dataset from the reader's perspective, and annotate the dataset of Mihalcea and Strapparava (2012) at the song level. We find that models trained on relatively small song datasets achieve marginally better performance than BERT (Devlin et al., 2019) finetuned on large social media or dialog datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Text-based sentiment analysis has become increasingly popular in recent years, in part due to its numerous applications in fields such as marketing, politics, and psychology (Rambocas and Pacheco, 2018; Haselmayer and Jenny, 2017; Provoost et al., 2019) . However, the vast majority of sentiment analysis models are built to identify net positive or negative sentiment rather than more complex, ambiguous emotions such as anticipation, surprise, or nostalgia (Jongeling et al., 2017) . As a result, current models usually fail to portray the coexistence of multiple emotions within a text sample, resulting in limited characterization of a human's true emotions. Songs are often created to elicit complex emotional responses from listeners, and thus are an interesting area of study to understand nuanced emotions .", |
|
"cite_spans": [ |
|
{ |
|
"start": 174, |
|
"end": 202, |
|
"text": "(Rambocas and Pacheco, 2018;", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 203, |
|
"end": 230, |
|
"text": "Haselmayer and Jenny, 2017;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 231, |
|
"end": 253, |
|
"text": "Provoost et al., 2019)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 459, |
|
"end": 483, |
|
"text": "(Jongeling et al., 2017)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This paper examines a variety of approaches to address the multi-emotion classification problem. We aim to build an emotion classification model that can detect the presence of multiple emotions in song lyrics with comparable accuracy to the typical inter-annotator agreement for textbased sentiment analysis (70-90%) (Diakopoulos and Shamma, 2010; Bobicev and Sokolova, 2017; Takala et al., 2014) . Building such a model is especially challenging in practice as there often exists considerable disagreement regarding the perception and interpretation of the emotions of a song or ambiguity within the song itself (Kim et al., 2010) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 318, |
|
"end": 348, |
|
"text": "(Diakopoulos and Shamma, 2010;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 349, |
|
"end": 376, |
|
"text": "Bobicev and Sokolova, 2017;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 377, |
|
"end": 397, |
|
"text": "Takala et al., 2014)", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 614, |
|
"end": 632, |
|
"text": "(Kim et al., 2010)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "There exist a variety of high-quality text datasets for emotion classification, from social media datasets such as CBET (Shahraki, 2015) and TEC (Mohammad, 2012) to large dialog corpora such as the DailyDialog dataset (Li et al., 2017) . However, there remains a lack of comparable emotionannotated song lyric datasets, and existing lyrical datasets are often annotated for valence-arousal affect rather than distinct emotions (\u00c7ano and Morisio, 2017) . Consequently, we introduce the Edmonds Dance Dataset 1 , a novel lyrical dataset that was crowdsourced through Amazon Mechanical Turk. Our dataset consists of scalar annotations for the 8 core emotions presented by Plutchik (2001) , with annotations collected at the song level and from the reader's perspective.", |
|
"cite_spans": [ |
|
{ |
|
"start": 120, |
|
"end": 136, |
|
"text": "(Shahraki, 2015)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 145, |
|
"end": 161, |
|
"text": "(Mohammad, 2012)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 218, |
|
"end": 235, |
|
"text": "(Li et al., 2017)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 427, |
|
"end": 451, |
|
"text": "(\u00c7ano and Morisio, 2017)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 669, |
|
"end": 684, |
|
"text": "Plutchik (2001)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We find that BERT models trained on out-ofdomain data do not generalize well to song lyrics and have lower F1 scores than Naive Bayes classifiers for emotions such as disgust and fear. However, BERT models trained on small lyrical datasets achieve marginally better performance, despite indomain datasets being orders of magnitude smaller than their counterparts. We also find that surprise has significantly lower inter-annotator agreement and test accuracy than other core emotions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "A multitude of models and techniques have been explored for song emotion classification. Both He et al. (2008) and Wang et al. (2011) found that fea-ture extraction from lyrics improves emotion classification performance. Researchers have trained Naive Bayes, HMM, SVM, clustering, and Random Forest models on lyrical and sometimes audio features to predict emotion in songs (Hu et al., 2009; Kim and Kwon, 2011; Jamdar et al., 2015; Rachman et al., 2018) . Deep learning frameworks have also been widely utilized for song emotion classification, ranging from CNNs and LSTMs (Delbouys et al., 2018; Abdillah et al., 2020) to transformer-based models such as BERT and ELMo (Parisi et al., 2019; Liu and Tan, 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 94, |
|
"end": 110, |
|
"text": "He et al. (2008)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 115, |
|
"end": 133, |
|
"text": "Wang et al. (2011)", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 375, |
|
"end": 392, |
|
"text": "(Hu et al., 2009;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 393, |
|
"end": 412, |
|
"text": "Kim and Kwon, 2011;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 413, |
|
"end": 433, |
|
"text": "Jamdar et al., 2015;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 434, |
|
"end": 455, |
|
"text": "Rachman et al., 2018)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 575, |
|
"end": 598, |
|
"text": "(Delbouys et al., 2018;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 599, |
|
"end": 621, |
|
"text": "Abdillah et al., 2020)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 672, |
|
"end": 693, |
|
"text": "(Parisi et al., 2019;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 694, |
|
"end": 712, |
|
"text": "Liu and Tan, 2020)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Multiple researchers have taken a multi-modal approach to emotion prediction. , introduced a novel corpus of both music and lyrics, and achieved promising results when using both musical and lyrical representations of songs in emotion classification. Similarly, Yang et al. (2008) found an increase in 4-class emotion prediction accuracy from 46.6 to 57.1 percent when incorporating lyrics into models trained on audio.", |
|
"cite_spans": [ |
|
{ |
|
"start": 262, |
|
"end": 280, |
|
"text": "Yang et al. (2008)", |
|
"ref_id": "BIBREF47" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "However, audio data can lead to problematic bias in emotion classification. Susino and Schubert (2019b) explored the presence of emotion stereotyping in certain genres, and found that heavy metal and hip-hop music were perceived to have more negative emotions than pop music with matched lyrics. Susino and Schubert (2019a) also found that emotional responses to an audio sample of a song could be predicted by stereotypes of the culture with which the song's genre was associated. Additionally, Fried (1999) found that violent lyrical passages were seen to be significantly more negative when represented as rap songs rather than country songs. Dunbar et al. (2016) validated Fried's findings through multiple studies in which participants believed that identical lyrics were more offensive when portrayed as rap rather than country music.", |
|
"cite_spans": [ |
|
{ |
|
"start": 76, |
|
"end": 103, |
|
"text": "Susino and Schubert (2019b)", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 296, |
|
"end": 323, |
|
"text": "Susino and Schubert (2019a)", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 482, |
|
"end": 508, |
|
"text": "Additionally, Fried (1999)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 646, |
|
"end": 666, |
|
"text": "Dunbar et al. (2016)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Lyrics are paramount for the accurate prediction of emotion in music. Yang and Lee (2009) transformed song lyrics into psychological feature vectors using a content analysis package and concluded that song lyrics alone can be used to generate promising, human-comprehensible classification models. Hu et al. (2009) found that audio features did not always outperform lyric features for mood prediction, and that combining lyric and audio features does not necessarily improve mood prediction over simply training on lyrics features. In later research, Hu and Downie (2010) found that lyrics features significantly outperformed au-dio features in 7 of 18 mood categories, while audio features outperformed lyrical features in only one.", |
|
"cite_spans": [ |
|
{ |
|
"start": 70, |
|
"end": 89, |
|
"text": "Yang and Lee (2009)", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 298, |
|
"end": 314, |
|
"text": "Hu et al. (2009)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 552, |
|
"end": 572, |
|
"text": "Hu and Downie (2010)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Research is split regarding crowdsourced emotion annotation quality; while Mohammad and Bravo-Marquez (2017) achieved strong results through crowdsourcing labels, Hasan et al. (2014) found crowd labels to sometimes not even be in agreement with themselves. Surprise is an emotion that is especially difficult to model (Buechel and Hahn, 2017; Schuff et al., 2017) , less frequent (Oberl\u00e4nder and Klinger, 2018) , and is sometimes divided into positive and negative surprise (Alm et al., 2005) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 75, |
|
"end": 108, |
|
"text": "Mohammad and Bravo-Marquez (2017)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 163, |
|
"end": 182, |
|
"text": "Hasan et al. (2014)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 318, |
|
"end": 342, |
|
"text": "(Buechel and Hahn, 2017;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 343, |
|
"end": 363, |
|
"text": "Schuff et al., 2017)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 396, |
|
"end": 410, |
|
"text": "Klinger, 2018)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 474, |
|
"end": 492, |
|
"text": "(Alm et al., 2005)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Lyrics are valuable for song emotion prediction and decent classification models can be generated solely on song lyrics. However, many lyrical datasets for song emotion classification are based on valence-arousal and lack emotions such as surprise or fear, which are important components of mood (Ekman and Friesen, 2003) . In addition, there is a lack of large, high quality datasets capturing complex emotion in music.", |
|
"cite_spans": [ |
|
{ |
|
"start": 296, |
|
"end": 321, |
|
"text": "(Ekman and Friesen, 2003)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "In-domain Datasets", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Consequently, we created the Edmonds Dance dataset, a novel corpus of English song lyrics annotated for emotion from the reader's perspective. By searching a Spotify playlist consisting of 800 songs, both lyrical and instrumental, and collecting available lyrics from LyricFind, Genius, and MusixMatch (Lyr; Gen; Mus), we retrieved lyrics for 524 songs. We then labeled our dataset based on Plutchik's 8 core emotions of Anger, Anticipation, Disgust, Fear, Joy, Sadness, Surprise, and Trust (Plutchik, 2001) . Table 1 depicts a subsection of the Edmonds Dance dataset, while the Appendix has more information on our labeling methods.", |
|
"cite_spans": [ |
|
{ |
|
"start": 491, |
|
"end": 507, |
|
"text": "(Plutchik, 2001)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 510, |
|
"end": 517, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A Novel Lyrics Dataset Annotated for Emotion", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In addition to the Edmonds Dance dataset, we also reannotated the dataset introduced in Mihalcea and Strapparava (2012), a multimodal corpus of songs that includes scalar annotations of both audio and lyrics for Ekman's six core emotions: Anger, Disgust, Fear, Joy, Sadness, and Surprise (Ekman, 1993) . The original dataset was annotated from the songwriter's perspective and at a line level. We averaged these line-level lyrical annotations to achieve classifications at higher levels, thus gen- Annotation Guidelines To generate reliable annotations, our HIT included detailed annotation instructions. We organized these guidelines into four sections: initial instructions, important notes, definitions, and examples. The initial instructions section provided the annotator with basic task information, stating that he or she will be given a set of song lyrics, and is expected to record the degree to which the lyrics contain eight specific emotions. We also stated that emotions would be rated on a 6-point scale ranging from the complete absence of an emotion to the extreme presence of an emotion.", |
|
"cite_spans": [ |
|
{ |
|
"start": 288, |
|
"end": 301, |
|
"text": "(Ekman, 1993)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mihalcea/Strapparava Dataset Reannotation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Hip Hop (6.5 %)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Country (1.1 %)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Pop (12.9 %) EDM (79.6 %) The important notes section emphasized that English speakers were required for the task, and that completion of all fields was required. The definitions section provided dictionary-level definitions for each of the eight emotions, while the examples section provided two annotated examples, along with general annotation guidelines (see Appendix A.2 for HIT images).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Country (1.1 %)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Each HIT contained the same two example songs. Each of the eight emotions was present in at least one of the songs, and emotions evoked by each song were apparent from the lyrics. Our HITs are available upon request.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Country (1.1 %)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We evaluated annotator reliability by calculating the average Cohen's Kappa of each annotator against others assigned to the same HIT, and discarding those below the threshold of 0.25. We then analyzed agreement across emotions by calculating Krippendorf's Alpha on the remaining annotators, and examined the agreement between original and Turker annotations using Pearson's correlation coefficient. Table 3 depicts our results, with more details available in the Appendix.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 400, |
|
"end": 407, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Surprise had significantly lower inter-annotator agreement than other emotions. Krippendorf's Alpha and Pearson's Correlation values were lowest for Surprise, with significant correlation differences compared to all other emotions except Anticipation. Meanwhile, Joy and Sadness had relatively higher alpha and correlation values, suggesting a hierarchy of difficulty in emotion classification. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To confirm the quality of our dataset, we analyzed differences in annotation patterns between included and discarded Turkers. Discarded annotators had lower median completion time across the Edmonds Dance and Mihalcea/Strapparava datasets (p<.005), were more likely to say that they disliked a song (p<.005), and were less likely to say that they were unfamiliar with a song (p<.001). We also found that discarded annotators spent less time than included annotators on labeling songs that they disliked (p<.001). Further details are in the Appendix.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Analysis of Crowd Workers", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To explore the efficacy of out-of-domain model training, we used the CBET (Shahraki, 2015) , TEC (Mohammad, 2012) , and DailyDialog (Li et al., 2017) To train more robust baseline models, we also created augmented and transformed versions of the datasets; details on this process are available in the Appendix. While no versions of the CBET, TEC, and DailyDialog datasets include music lyrics, they are large enough to train deep models which we hypothesized could accurately predict emotions in smaller, gold-standard test datasets of song lyrics.", |
|
"cite_spans": [ |
|
{ |
|
"start": 74, |
|
"end": 90, |
|
"text": "(Shahraki, 2015)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 97, |
|
"end": 113, |
|
"text": "(Mohammad, 2012)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 132, |
|
"end": 149, |
|
"text": "(Li et al., 2017)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Out of Domain Datasets", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We chose Naive Bayes as our first baseline emotion classification model due to its widespread applications in text classification and sentiment analysis (Raschka, 2014) . Given its robustness to outliers and ability to deal with imbalanced data (Chen et al., 2004) , a Random Forest baseline model was also implemented. Lastly, we utilized a Most Frequent Sense (MFS) baseline model, given its strong performance in word sense disambiguation tasks and its applications to emotion classification (Preiss et al., 2009) . We trained our Naive Bayes model on bag-of-words features and our Random Forest model on transformed feature vectors which were generated from our textual datasets using the NRC Hashtag Emotion Lexicon (Mohammad and Turney, 2013) ; see Appendix for further details.", |
|
"cite_spans": [ |
|
{ |
|
"start": 153, |
|
"end": 168, |
|
"text": "(Raschka, 2014)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 245, |
|
"end": 264, |
|
"text": "(Chen et al., 2004)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 495, |
|
"end": 516, |
|
"text": "(Preiss et al., 2009)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 721, |
|
"end": 748, |
|
"text": "(Mohammad and Turney, 2013)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Implementation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "To improve upon emotion classification quality, we also explored more complex models. Due to its ability to generate powerful contextualized word embeddings and its state-of-the-art results in numerous language understanding tasks (Devlin et al., 2019) , the BERT BASE uncased architecture was fine-tuned for multi-emotion classification from the text of song lyrics. BERT BASE consists of 12 Transformer blocks, a hidden size of 768, 12 self-attention heads, and an additional output layer which we used for fine-tuning. 2", |
|
"cite_spans": [ |
|
{ |
|
"start": 231, |
|
"end": 252, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Implementation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We trained separate BERT models for each emotion on the original and augmented CBET datasets, and tested their performance on the Edmonds Dance and Mihalcea/Strapparava datasets. We then compared these results with those of our baseline Naive Bayes, Random Forest, and Most Frequent Sense models. To compare emotion prediction accuracy across multiple text corpora, we also trained BERT models on the TEC and DailyDialog datasets, and tested them on our lyrical datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We found that BERT models trained on the CBET, TEC, and DailyDialog datasets did not generalize well to lyrical data. While models for joy and sadness improved upon the performance of baseline classifiers, models for disgust and fear performed worse than our Naive Bayes baseline. Furthermore, data augmentation techniques improved the performance of our baseline Naive Bayes model, but did not significantly increase BERT model accuracy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "To compare in-domain model accuracy with our out-of-domain results, we trained and tested BERT models on the Edmonds Dance and Mihalcea/Strapparava datasets, and vice versa. Models trained and tested on lyrical datasets had marginally better accuracy and F1 scores than out-of-domain models for anger, joy, and sadness. Given the much smaller sizes of lyrical datasets compared to their counterparts, as well as the differences in song genre and annotation perspective across lyrical datasets, our findings suggest a significant advantage in using in-domain data to train models for complex emotion classification of songs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Finally, all models performed poorly when classifying surprise, and F1 scores for anger, disgust, and fear remained consistently low across models, suggesting a steep hierarchy of difficulty regarding emotion classification. Inter-annotator agreement was much lower for surprise than other emotions, and none of our models were able to accurately predict the presence of surprise in song lyrics. Our work implies that surprise is unique from the perspective of emotion classification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Tables 5 and 6 highlight our model results. A complete version of our evaluation results is available in the Appendix. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In this paper we explore a variety of approaches to the multi-emotion classification problem for songs. We introduce the Edmonds Dance dataset, a novel lyrical dataset annotated for emotion at the song level and from the reader's perspective. We find that emotion classification of song lyrics using state-of-the-art methods is difficult to accomplish using out-of-domain data; BERT models trained on large corpora of tweets and dialogue do not generalize to lyrical data for emotions other than joy and sadness, and are outperformed by Naive Bayes classifiers on disgust and fear. On the other hand, models trained on song lyrics achieve comparable accuracy to models trained on out-of-domain data, even when lyrical datasets are orders of magnitude smaller than their counterparts, have been aggregated from line to song level, have been annotated from different perspectives, and are composed of different genres of music. Our findings underscore the importance of using in-domain data for song emotion classification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Our dataset was annotated by 184 Amazon Mechanical Turk crowdworkers. Annotators were paid $0.15 per task or \u223c $6.75 per hour, and reliable annotators (see Appendix A.2) were awarded a bonus of $0.10 per task or \u223c $11.25 per hour.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ethical Consideration", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The Mihalcea/Strapparava dataset initially consisted of 4976 lines across 100 songs which were annotated using a scale from 0 to 10, with 0 as the absence of emotion and 10 as the highest intensity of emotion. Annotations were based on Ekman's six core emotions: Anger, Disgust, Fear, Joy, Sadness, and Surprise (Ekman, 1993) . As the dataset was annotated at a line level, we averaged emotion annotations on each line to achieve classifications at higher levels. Through averaging, we generated 452 verse-based and 100 song-based annotations. With regards to the Edmonds Dance Dataset, the basis for label selection was provided by Plutchik's Theory of Emotion, which postulates that all emotions are combinations of the 8 core emotions present in our label (Plutchik, 2001) . As a result, the label can lead to additional classification models for emotions which are theorized to be dyads of the core emotions (e.g, PLove = PJoy * PTrust, or PAggressiveness = PAnger * PAnticipation ). Our dataset was initially labeled using an array of size 8; each array index contained a binary value to indicate an emotion's presence. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 312, |
|
"end": 325, |
|
"text": "(Ekman, 1993)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 759, |
|
"end": 775, |
|
"text": "(Plutchik, 2001)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Lyrical Datasets", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To evaluate the reliability of our Mechanical Turk annotations, we first used Cohen's Kappa to calculate the average inter-annotator agreement of each Turker against others assigned to the same HIT. We then discarded all annotators who failed to meet a threshold of 0.25, and calculated average agreement for each emotion using Krippendorf's Alpha on the remaining annotators. Krippendorf's Alpha values were highest for the emotions of joy, sadness, and trust; additionally, alpha values were relatively consistent across emotions. 31.6% of annotations in Mihalcea and Strapparava's dataset failed to meet the Cohen's Kappa threshold, while 63.2% of annotations in the Edmonds Dance dataset failed to meet the threshold. Our results are summarized in Table A1 , while Figures A1, A2, and A3 depict pictures of our HITs. Next, we calculated the Pearson's correlation coefficient and related p-values between original annotations and the Turker annotations for both the Edmonds Dance and Mihalcea/Strapparava datasets. We were also unable to calculate correlation coefficients for Anticipation or Trust in the Rada dataset as the original dataset did not include annotations for these emotions. These results are summarized in Table A2 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 752, |
|
"end": 760, |
|
"text": "Table A1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 1226, |
|
"end": 1234, |
|
"text": "Table A2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A.2 Annotator Error Analysis", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "While the relative strength of Pearson's Correlations across emotions was similar to that of our alpha values, correlation with fear was relatively higher than expected, and correlation with anger and surprise were lower than expected. Finally, we looked at Krippendorf's Alpha Values on an an- Figure A3 : HIT Annotation Format notation group level to better understand whether annotation agreement for specific emotions were consistently similar across songs. Our results, summarized in Table A3 , provide evidence for a hierarchy of difficulty in emotion classification. Joy and Sadness have the most favorable distribution of alpha values with few low item-level alpha scores (<0.2), and greater numbers of medium (0.2-0.6) and high (>0.6) item-level alpha scores. We then created a heat map of item-level Krippendorf's Alphas to explore correlation of interannotator agreement across emotions. Our results, visualized in Figure A4 , reveal that alpha values are only slightly correlated across emotions. This implies that classification difficulty of a specific emotion varies depending on the song being an- Figure A5 : Annotation Completion Time by Quantile notated; indeed, the only emotions that have an inter-annotator agreement correlation above 0.4 are Anger/Disgust and Anger/Anticipation. We can also see that only joy has a moderate correlation with overall agreement across emotions, implying that songs with annotation agreement regarding joy may be easier to classify overall, but songs with annotation agreement regarding other emotions may not necessarily be easier to annotate. Consequently, the claim of a consistent hierarchy of difficulty is somewhat undermined and instead it seems that classification difficulty of a specific emotion varies depending on the song being annotated.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 295, |
|
"end": 304, |
|
"text": "Figure A3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 489, |
|
"end": 497, |
|
"text": "Table A3", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 926, |
|
"end": 935, |
|
"text": "Figure A4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1114, |
|
"end": 1123, |
|
"text": "Figure A5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A.2 Annotator Error Analysis", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We analyzed the completion time of annotations across good and bad annotators for the Edmonds Dance and Mihalcea/Strapparava datasets, summarized in Figure A5 . We can see that the distributions of completion times were very similar for bad annotators, while the distributions for good annotators were skewed upwards at higher deciles. In addition, the median completion time for good annotators was 31 seconds greater than the median completion time for bad annotators, and the mean completion time for good annotators was 37 seconds greater than that of bad annotators.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 149, |
|
"end": 158, |
|
"text": "Figure A5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A.3 Analysis of Crowd Workers", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Next, we looked at differences between good and bad annotator groups regarding annotator enjoyment and familiarity of labeled songs. We found that bad annotators were more likely than good annotators to say that they were familiar with a song (p<.00001), or that they disliked a song (p<.005). Bad annotators also spent significantly less time than good annotators on labeling songs that they said they disliked (p<0.0001). These results are summarized in Figures A6 and A7 . ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 456, |
|
"end": 473, |
|
"text": "Figures A6 and A7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A.3 Analysis of Crowd Workers", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To address misclassification of the minority class, we implemented oversampling techniques so classes would be more balanced. For each emotion in the CBET dataset, we added copies of tweets suggesting the presence of the emotion such that the new number of tweets with presence of emotion was between 40-60 percent of the total number of tweets. We then trained two BERT models for each emotion, one using the augmented CBET dataset and the other using the original. To confirm the quality of the original CBET dataset, we also trained and tested BERT models on subsets of CBET data that were randomly sampled without replacement. These results are shown in Table A4 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 658, |
|
"end": 666, |
|
"text": "Table A4", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A.4.1 Data Augmentation and Transformation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To explore another approach to the multiemotion classification problem, lyrical data was then transformed into a feature vector of length 9 using the NRC Hashtag Emotion Lexicon, which contains binary indicators regarding the presence or absence of Plutchik's 8 core emotions in 14182 common English words (Mohammad and Turney, 2013) . This occurred by iterating through a song's lyrics, counting each word present in the NRC Emotion Lexicon as well as its emotional classification, and storing this information in the feature vector. For example, the feature vector [5, 10, 1, 9, 4, 2, 2, 3, 28] would correspond to a song's lyrics that contained 28 words (not necessarily all distinct) which were present in the NRC Emotion Lexicon. Of these words, 5 were associated with joy, 10 with trust, 1 with fear, etc. This transformed dataset was generated for the purpose of exploring Random Forest methods for song emotion classification.", |
|
"cite_spans": [ |
|
{ |
|
"start": 306, |
|
"end": 333, |
|
"text": "(Mohammad and Turney, 2013)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 567, |
|
"end": 570, |
|
"text": "[5,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 571, |
|
"end": 574, |
|
"text": "10,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 575, |
|
"end": 577, |
|
"text": "1,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 578, |
|
"end": 580, |
|
"text": "9,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 581, |
|
"end": 583, |
|
"text": "4,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 584, |
|
"end": 586, |
|
"text": "2,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 587, |
|
"end": 589, |
|
"text": "2,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 590, |
|
"end": 592, |
|
"text": "3,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 593, |
|
"end": 596, |
|
"text": "28]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4.1 Data Augmentation and Transformation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To gauge the quality of the CBET dataset, we first calculated the accuracies of BERT models trained and tested on randomly ordered subsets of CBET data, with an 80/20 train/test split. Emotion classification accuracies of these models were at least 90%, confirming the quality of the dataset. Next, we trained BERT models on the full CBET datasets, and evaluated them on the verse-based variation of Mihalcea and Strapparava's dataset, as well as the Edmonds Dance dataset. All BERT models were trained for 3 epochs, and used a sequence length of 128, batch size of 32, learning rate of 2e \u22125 , and warmup proportion of 0.1. The performance of these models, depicted in Table A4 , were then compared to the performance of baseline Naive Bayes and Random Forest models, shown in Tables A5 and A6. Only the baseline Naive Bayes model trained on augmented CBET data is depicted in Table A5 , as the Naive Bayes model trained on normal CBET data had precision and recall of zero for each emotion.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 670, |
|
"end": 678, |
|
"text": "Table A4", |
|
"ref_id": "TABREF6" |
|
}, |
|
{ |
|
"start": 878, |
|
"end": 886, |
|
"text": "Table A5", |
|
"ref_id": "TABREF8" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A.5 Evaluation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "It can be seen from Table A4 that BERT models trained on CBET did not generalize well to lyrical datasets. While models for joy and sadness improved on the performance of Naive Bayes and Random Forest classifiers, models for other emotions did not significantly improve on the baseline, and in some cases performed worse than baseline classifiers. BERT models for anger and fear had lower precision and recall than corresponding to the baseline. Additionally, BERT and Random Forest models were unable to correctly identify disgust, while Naive Bayes models successfully identified multiple instances of disgust. As there was not a significant difference in balance between emotion classes within the CBET dataset, the fact that data augmentation did not significantly improve baseline precision and recall implies that class imbalance was not a main factor in discrepancies between classification accuracy of different emotions.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 20, |
|
"end": 28, |
|
"text": "Table A4", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A.5 Evaluation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To compare emotion prediction accuracies across multiple text corpora, we then trained BERT models on the TEC and DailyDialog datasets, and tested them on the Edmonds Dance and Mihalcea/Strapparava datasets. The results are summarized in Tables A7 and A8. Both test accuracy for the TEC and the DailyDialog models were similar to those of the CBET models, implying that the dialog domain does not necessarily show more promise than the social media domain when considering the complex emotion classification problem in lyrics.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.5 Evaluation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Finally, to compare in-domain model accuracy with our out of domain results, we trained and tested BERT models on the larger, original versions of the Edmonds Dance and Mihalcea/Strapparava datasets respectively, and vice versa. The results are summarized below in Table A9 . We found that the accuracies of models trained and tested on the Edmonds Dance and Mihalcea/Strapparava datasets were on par with those of the out of domain models despite the much smaller training size and genre differences across the lyrical datasets, implying a significant advantage in using in-domain data to train models for complex emotion classification of songs.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 265, |
|
"end": 273, |
|
"text": "Table A9", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A.5 Evaluation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "It is important to note that precision and recall values for disgust, fear, and surprise remained very low, which could imply that certain emotions are generally more difficult than others to classify. This conclusion is supported by our Turker error analysis in Section 3.1.3, in which we found that emotions such as anticipation, disgust, fear and surprise had relatively lower inter-annotator agreement, while other emotions such as joy and sadness had relatively high agreement.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.5 Evaluation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Mihalcea and Strapparava included a table in their paper with the number of lines that each of their 6 core emotions was present in, as well as the average magnitude for each emotion across all annotated lines. We used this information to calculate the average magnitude for each emotion across lines in which they were present, shown in Table A10 . As emotions were annotated on a scale from 0", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 338, |
|
"end": 347, |
|
"text": "Table A10", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A.6 Miscellaneous: Emotion Magnitudes by Line", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The Edmonds Dance dataset is available by request from the authors of this paper.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "BERT BASE is available at https://tfhub.dev/ google/bert_uncased_L-12_H-768_A-12/1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "to 10, we found it worthwhile to note that annotations for the presence of negative emotions such as anger, disgust, and fear were more likely to be mild than strong. We also found it interesting that only joy had an average magnitude greater than 3, which represented the cutoff for the presence of an emotion .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "annex", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Emotion classification of song lyrics using bidirectional lstm method with glove word representation weighting", |
|
"authors": [ |
|
{ |
|
"first": "Jiddy", |
|
"middle": [], |
|
"last": "Abdillah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ibnu", |
|
"middle": [], |
|
"last": "Asror", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanuar Firdaus Arie", |
|
"middle": [], |
|
"last": "Wibowo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "723--729", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiddy Abdillah, Ibnu Asror, Yanuar Firdaus Arie Wibowo, et al. 2020. Emotion classification of song lyrics using bidirectional lstm method with glove word representation weighting. Jurnal RESTI (Rekayasa Sistem Dan Teknologi Informasi), 4(4):723-729.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Emotions from text: machine learning for text-based emotion prediction", |
|
"authors": [ |
|
{ |
|
"first": "Cecilia", |
|
"middle": [], |
|
"last": "Ovesdotter Alm", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Sproat", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of human language technology conference and conference on empirical methods in natural language processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "579--586", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cecilia Ovesdotter Alm, Dan Roth, and Richard Sproat. 2005. Emotions from text: machine learning for text-based emotion prediction. In Proceedings of human language technology conference and confer- ence on empirical methods in natural language pro- cessing, pages 579-586.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Naive bayes classifiers for music emotion classification based on lyrics", |
|
"authors": [ |
|
{ |
|
"first": "Yunjing", |
|
"middle": [], |
|
"last": "An", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shutao", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shujuan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "2017 IEEE/ACIS 16th International Conference on Computer and Information Science (ICIS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "635--638", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yunjing An, Shutao Sun, and Shujuan Wang. 2017. Naive bayes classifiers for music emotion classifica- tion based on lyrics. In 2017 IEEE/ACIS 16th Inter- national Conference on Computer and Information Science (ICIS), pages 635-638. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Interannotator agreement in sentiment analysis: Machine learning perspective", |
|
"authors": [ |
|
{ |
|
"first": "Victoria", |
|
"middle": [], |
|
"last": "Bobicev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marina", |
|
"middle": [], |
|
"last": "Sokolova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "RANLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "97--102", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Victoria Bobicev and Marina Sokolova. 2017. Inter- annotator agreement in sentiment analysis: Machine learning perspective. In RANLP, pages 97-102.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Emobank: Studying the impact of annotation perspective and representation format on dimensional emotion analysis", |
|
"authors": [ |
|
{ |
|
"first": "Sven", |
|
"middle": [], |
|
"last": "Buechel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Udo", |
|
"middle": [], |
|
"last": "Hahn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 15th Conference of the European Chapter", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "578--585", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sven Buechel and Udo Hahn. 2017. Emobank: Study- ing the impact of annotation perspective and repre- sentation format on dimensional emotion analysis. In Proceedings of the 15th Conference of the Euro- pean Chapter of the Association for Computational Linguistics: Volume 2, Short Papers, pages 578- 585.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Moodylyrics: A sentiment annotated lyrics dataset", |
|
"authors": [ |
|
{ |
|
"first": "Erion", |
|
"middle": [], |
|
"last": "\u00c7ano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maurizio", |
|
"middle": [], |
|
"last": "Morisio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 International Conference on Intelligent Systems, Metaheuristics & Swarm Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "118--124", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erion \u00c7ano and Maurizio Morisio. 2017. Moodylyrics: A sentiment annotated lyrics dataset. In Proceed- ings of the 2017 International Conference on Intelli- gent Systems, Metaheuristics & Swarm Intelligence, pages 118-124.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Using random forest to learn imbalanced data", |
|
"authors": [ |
|
{ |
|
"first": "Chao", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andy", |
|
"middle": [], |
|
"last": "Liaw", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leo", |
|
"middle": [], |
|
"last": "Breiman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "", |
|
"volume": "110", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chao Chen, Andy Liaw, Leo Breiman, et al. 2004. Us- ing random forest to learn imbalanced data. Univer- sity of California, Berkeley, 110(1-12):24.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Music mood detection based on audio and lyrics with deep neural net", |
|
"authors": [ |
|
{ |
|
"first": "R\u00e9mi", |
|
"middle": [], |
|
"last": "Delbouys", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Romain", |
|
"middle": [], |
|
"last": "Hennequin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francesco", |
|
"middle": [], |
|
"last": "Piccoli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimena", |
|
"middle": [], |
|
"last": "Royo-Letelier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manuel", |
|
"middle": [], |
|
"last": "Moussallam", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 19th International Society for Music Information Retrieval Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "370--375", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R\u00e9mi Delbouys, Romain Hennequin, Francesco Pic- coli, Jimena Royo-Letelier, and Manuel Moussal- lam. 2018. Music mood detection based on audio and lyrics with deep neural net. In Proceedings of the 19th International Society for Music Information Retrieval Conference, ISMIR 2018, Paris, France, September 23-27, 2018, pages 370-375.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Characterizing debate performance via aggregated twitter sentiment", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Nicholas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Diakopoulos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Shamma", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the SIGCHI conference on human factors in computing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1195--1198", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nicholas A Diakopoulos and David A Shamma. 2010. Characterizing debate performance via aggregated twitter sentiment. In Proceedings of the SIGCHI conference on human factors in computing systems, pages 1195-1198.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "The threatening nature of \"rap\" music. Psychology", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Dunbar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Charis", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Kubrin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicholas", |
|
"middle": [], |
|
"last": "Scurich", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Public Policy, and Law", |
|
"volume": "22", |
|
"issue": "3", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Dunbar, Charis E Kubrin, and Nicholas Scurich. 2016. The threatening nature of \"rap\" music. Psy- chology, Public Policy, and Law, 22(3):280.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Facial expression and emotion", |
|
"authors": [ |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Ekman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "American psychologist", |
|
"volume": "48", |
|
"issue": "4", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paul Ekman. 1993. Facial expression and emotion. American psychologist, 48(4):384.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Unmasking the face: A guide to recognizing emotions from facial clues", |
|
"authors": [ |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Ekman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wallace V Friesen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paul Ekman and Wallace V Friesen. 2003. Unmask- ing the face: A guide to recognizing emotions from facial clues. ISHK.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Who's afraid of rap: Differential reactions to music lyrics 1", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Carrie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Fried", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Journal of Applied Social Psychology", |
|
"volume": "29", |
|
"issue": "4", |
|
"pages": "705--721", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Carrie B Fried. 1999. Who's afraid of rap: Differen- tial reactions to music lyrics 1. Journal of Applied Social Psychology, 29(4):705-721.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Using hashtags as labels for supervised learning of emotions in twitter messages", |
|
"authors": [ |
|
{ |
|
"first": "Maryam", |
|
"middle": [], |
|
"last": "Hasan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emmanuel", |
|
"middle": [], |
|
"last": "Agu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elke", |
|
"middle": [], |
|
"last": "Rundensteiner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "ACM SIGKDD workshop on health informatics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maryam Hasan, Emmanuel Agu, and Elke Runden- steiner. 2014. Using hashtags as labels for super- vised learning of emotions in twitter messages. In ACM SIGKDD workshop on health informatics, New York, USA.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Sentiment analysis of political communication: combining a dictionary approach with crowdcoding. Quality & quantity", |
|
"authors": [ |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Haselmayer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcelo", |
|
"middle": [], |
|
"last": "Jenny", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "51", |
|
"issue": "", |
|
"pages": "2623--2646", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martin Haselmayer and Marcelo Jenny. 2017. Senti- ment analysis of political communication: combin- ing a dictionary approach with crowdcoding. Qual- ity & quantity, 51(6):2623-2646.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Language feature mining for music emotion classification via supervised learning from lyrics", |
|
"authors": [ |
|
{ |
|
"first": "Hui", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianming", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuhong", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wu", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ling", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "International Symposium on Intelligence Computation and Applications", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "426--435", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hui He, Jianming Jin, Yuhong Xiong, Bo Chen, Wu Sun, and Ling Zhao. 2008. Language feature mining for music emotion classification via super- vised learning from lyrics. In International Sympo- sium on Intelligence Computation and Applications, pages 426-435. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "When lyrics outperform audio for music mood classification: A feature analysis", |
|
"authors": [ |
|
{ |
|
"first": "Xiao", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Stephen Downie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "ISMIR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "619--624", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiao Hu and J Stephen Downie. 2010. When lyrics outperform audio for music mood classification: A feature analysis. In ISMIR, pages 619-624.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Lyric text mining in music mood classification", |
|
"authors": [ |
|
{ |
|
"first": "Xiao", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Stephen Downie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ehmann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "183", |
|
"issue": "", |
|
"pages": "2--209", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiao Hu, J Stephen Downie, and Andreas F Ehmann. 2009. Lyric text mining in music mood classifica- tion. American music, 183(5,049):2-209.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Emotion analysis of songs based on lyrical and audio features", |
|
"authors": [ |
|
{ |
|
"first": "Adit", |
|
"middle": [], |
|
"last": "Jamdar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jessica", |
|
"middle": [], |
|
"last": "Abraham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karishma", |
|
"middle": [], |
|
"last": "Khanna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rahul", |
|
"middle": [], |
|
"last": "Dubey", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1506.05012" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adit Jamdar, Jessica Abraham, Karishma Khanna, and Rahul Dubey. 2015. Emotion analysis of songs based on lyrical and audio features. arXiv preprint arXiv:1506.05012.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "On negative results when using sentiment analysis tools for software engineering research", |
|
"authors": [ |
|
{ |
|
"first": "Robbert", |
|
"middle": [], |
|
"last": "Jongeling", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Proshanta", |
|
"middle": [], |
|
"last": "Sarkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Subhajit", |
|
"middle": [], |
|
"last": "Datta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Serebrenik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Empirical Software Engineering", |
|
"volume": "22", |
|
"issue": "5", |
|
"pages": "2543--2584", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robbert Jongeling, Proshanta Sarkar, Subhajit Datta, and Alexander Serebrenik. 2017. On negative re- sults when using sentiment analysis tools for soft- ware engineering research. Empirical Software En- gineering, 22(5):2543-2584.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Lyrics-based emotion classification using feature selection by partial syntactic analysis", |
|
"authors": [ |
|
{ |
|
"first": "Minho", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hyuk-Chul", |
|
"middle": [], |
|
"last": "Kwon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "2011 IEEE 23rd International Conference on Tools with Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "960--964", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Minho Kim and Hyuk-Chul Kwon. 2011. Lyrics-based emotion classification using feature selection by par- tial syntactic analysis. In 2011 IEEE 23rd Inter- national Conference on Tools with Artificial Intelli- gence, pages 960-964. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Music emotion recognition: A state of the art review", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Youngmoo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erik", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raymond", |
|
"middle": [], |
|
"last": "Schmidt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Migneco", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Brandon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Morton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Richardson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Scott", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proc. ismir", |
|
"volume": "86", |
|
"issue": "", |
|
"pages": "937--952", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Youngmoo E Kim, Erik M Schmidt, Raymond Migneco, Brandon G Morton, Patrick Richardson, Jeffrey Scott, Jacquelin A Speck, and Douglas Turn- bull. 2010. Music emotion recognition: A state of the art review. In Proc. ismir, volume 86, pages 937- 952.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Dailydialog: A manually labelled multi-turn dialogue dataset", |
|
"authors": [ |
|
{ |
|
"first": "Yanran", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hui", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaoyu", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenjie", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ziqiang", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuzi", |
|
"middle": [], |
|
"last": "Niu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Eighth International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "986--995", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yanran Li, Hui Su, Xiaoyu Shen, Wenjie Li, Ziqiang Cao, and Shuzi Niu. 2017. Dailydialog: A manually labelled multi-turn dialogue dataset. In Proceedings of the Eighth International Joint Conference on Nat- ural Language Processing (Volume 1: Long Papers), pages 986-995.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Research on multimodal music emotion classification based on audio and lyirc", |
|
"authors": [ |
|
{ |
|
"first": "Gaojun", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "2020 IEEE 4th Information Technology, Networking, Electronic and Automation Control Conference (ITNEC)", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "2331--2335", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gaojun Liu and Zhiyuan Tan. 2020. Research on multi- modal music emotion classification based on audio and lyirc. In 2020 IEEE 4th Information Technol- ogy, Networking, Electronic and Automation Con- trol Conference (ITNEC), volume 1, pages 2331- 2335. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Lyrics, music, and emotions", |
|
"authors": [ |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carlo", |
|
"middle": [], |
|
"last": "Strapparava", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 2012", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rada Mihalcea and Carlo Strapparava. 2012. Lyrics, music, and emotions. In Proceedings of the 2012", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Natural Language Processing and Computational Natural Language Learning", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "590--599", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning, pages 590-599.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "# emotional tweets", |
|
"authors": [ |
|
{ |
|
"first": "Saif", |
|
"middle": [], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "* SEM 2012: The First Joint Conference on Lexical and Computational Semantics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "246--255", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Saif Mohammad. 2012. # emotional tweets. In * SEM 2012: The First Joint Conference on Lexical and Computational Semantics-Volume 1: Proceedings of the main conference and the shared task, and Vol- ume 2: Proceedings of the Sixth International Work- shop on Semantic Evaluation (SemEval 2012), pages 246-255.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Emotion intensities in tweets", |
|
"authors": [ |
|
{ |
|
"first": "Saif", |
|
"middle": [], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felipe", |
|
"middle": [], |
|
"last": "Bravo-Marquez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "65--77", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/S17-1007" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Saif Mohammad and Felipe Bravo-Marquez. 2017. Emotion intensities in tweets. In Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017), pages 65-77, Vancouver, Canada. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Crowdsourcing a word-emotion association lexicon", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Saif", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Turney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Computational Intelligence", |
|
"volume": "29", |
|
"issue": "3", |
|
"pages": "436--465", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Saif M Mohammad and Peter D Turney. 2013. Crowd- sourcing a word-emotion association lexicon. Com- putational Intelligence, 29(3):436-465.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "An analysis of annotated corpora for emotion classification in text", |
|
"authors": [ |
|
{ |
|
"first": "Laura", |
|
"middle": [ |
|
"Ana" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Oberl\u00e4nder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roman", |
|
"middle": [], |
|
"last": "Klinger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2104--2119", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Laura Ana Maria Oberl\u00e4nder and Roman Klinger. 2018. An analysis of annotated corpora for emotion clas- sification in text. In Proceedings of the 27th Inter- national Conference on Computational Linguistics, pages 2104-2119.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Exploiting synchronized lyrics and vocal features for music emotion detection", |
|
"authors": [ |
|
{ |
|
"first": "Loreto", |
|
"middle": [], |
|
"last": "Parisi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simone", |
|
"middle": [], |
|
"last": "Francia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Silvio", |
|
"middle": [], |
|
"last": "Olivastri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [ |
|
"Stella" |
|
], |
|
"last": "Tavella", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1901.04831" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Loreto Parisi, Simone Francia, Silvio Olivastri, and Maria Stella Tavella. 2019. Exploiting synchronized lyrics and vocal features for music emotion detec- tion. arXiv preprint arXiv:1901.04831.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "The nature of emotions: Human emotions have deep evolutionary roots, a fact that may explain their complexity and provide tools for clinical practice", |
|
"authors": [ |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Plutchik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "American scientist", |
|
"volume": "89", |
|
"issue": "4", |
|
"pages": "344--350", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert Plutchik. 2001. The nature of emotions: Hu- man emotions have deep evolutionary roots, a fact that may explain their complexity and provide tools for clinical practice. American scientist, 89(4):344- 350.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Refining the most frequent sense baseline", |
|
"authors": [ |
|
{ |
|
"first": "Judita", |
|
"middle": [], |
|
"last": "Preiss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jon", |
|
"middle": [], |
|
"last": "Dehdari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josh", |
|
"middle": [], |
|
"last": "King", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dennis", |
|
"middle": [], |
|
"last": "Mehay", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Workshop on Semantic Evaluations: Recent Achievements and Future Directions", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "10--18", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Judita Preiss, Jon Dehdari, Josh King, and Dennis Mehay. 2009. Refining the most frequent sense base- line. In Proceedings of the Workshop on Semantic Evaluations: Recent Achievements and Future Di- rections (SEW-2009), pages 10-18.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Validating automated sentiment analysis of online cognitive behavioral therapy patient texts: An exploratory study", |
|
"authors": [ |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Provoost", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeroen", |
|
"middle": [], |
|
"last": "Ruwaard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heleen", |
|
"middle": [], |
|
"last": "Ward Van Breda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tibor", |
|
"middle": [], |
|
"last": "Riper", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bosse", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Frontiers in psychology", |
|
"volume": "10", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Simon Provoost, Jeroen Ruwaard, Ward van Breda, Heleen Riper, and Tibor Bosse. 2019. Validating automated sentiment analysis of online cognitive be- havioral therapy patient texts: An exploratory study. Frontiers in psychology, 10:1065.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Music emotion classification based on lyrics-audio using corpus based emotion", |
|
"authors": [ |
|
{ |
|
"first": "Riyanarto", |
|
"middle": [], |
|
"last": "Fika Hastarita Rachman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chastine", |
|
"middle": [], |
|
"last": "Sarno", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Fatichah", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "International Journal of Electrical & Computer Engineering", |
|
"volume": "8", |
|
"issue": "3", |
|
"pages": "2088--8708", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fika Hastarita Rachman, Riyanarto Sarno, and Chas- tine Fatichah. 2018. Music emotion classification based on lyrics-audio using corpus based emotion. International Journal of Electrical & Computer En- gineering (2088-8708), 8(3).", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Online sentiment analysis in marketing research: a review", |
|
"authors": [ |
|
{ |
|
"first": "Meena", |
|
"middle": [], |
|
"last": "Rambocas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Barney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Pacheco", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Journal of Research in Interactive Marketing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Meena Rambocas and Barney G Pacheco. 2018. On- line sentiment analysis in marketing research: a re- view. Journal of Research in Interactive Marketing.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Naive bayes and text classification i-introduction and theory", |
|
"authors": [ |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Raschka", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1410.5329" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastian Raschka. 2014. Naive bayes and text clas- sification i-introduction and theory. arXiv preprint arXiv:1410.5329.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Annotation, modelling and analysis of fine-grained emotions on a stance and sentiment detection corpus", |
|
"authors": [ |
|
{ |
|
"first": "Hendrik", |
|
"middle": [], |
|
"last": "Schuff", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeremy", |
|
"middle": [], |
|
"last": "Barnes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Mohme", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Pad\u00f3", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roman", |
|
"middle": [], |
|
"last": "Klinger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 8th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "13--23", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hendrik Schuff, Jeremy Barnes, Julian Mohme, Sebas- tian Pad\u00f3, and Roman Klinger. 2017. Annotation, modelling and analysis of fine-grained emotions on a stance and sentiment detection corpus. In Pro- ceedings of the 8th Workshop on Computational Ap- proaches to Subjectivity, Sentiment and Social Me- dia Analysis, pages 13-23.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Emotion detection from text", |
|
"authors": [ |
|
{ |
|
"first": "Ameneh Gholipour", |
|
"middle": [], |
|
"last": "Shahraki", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ameneh Gholipour Shahraki. 2015. Emotion detection from text. Master's thesis, University of Alberta.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "A parallel corpus of music and lyrics annotated with emotions", |
|
"authors": [ |
|
{ |
|
"first": "Carlo", |
|
"middle": [], |
|
"last": "Strapparava", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alberto", |
|
"middle": [], |
|
"last": "Battocchi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the Eighth International Conference on Language Resources and Evaluation (LREC'12)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2343--2346", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Carlo Strapparava, Rada Mihalcea, and Alberto Battoc- chi. 2012. A parallel corpus of music and lyrics an- notated with emotions. In Proceedings of the Eighth International Conference on Language Resources and Evaluation (LREC'12), pages 2343-2346, Istan- bul, Turkey. European Language Resources Associ- ation (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Cultural stereotyping of emotional responses to music genre", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Susino", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emery", |
|
"middle": [], |
|
"last": "Schubert", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Psychology of Music", |
|
"volume": "47", |
|
"issue": "3", |
|
"pages": "342--357", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Susino and Emery Schubert. 2019a. Cultural stereotyping of emotional responses to music genre. Psychology of Music, 47(3):342-357.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Negative emotion responses to heavy-metal and hip-hop music with positive lyrics", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Susino", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emery", |
|
"middle": [], |
|
"last": "Schubert", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Empirical Musicology Review", |
|
"volume": "14", |
|
"issue": "1-2", |
|
"pages": "2--15", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Susino and Emery Schubert. 2019b. Negative emotion responses to heavy-metal and hip-hop mu- sic with positive lyrics. Empirical Musicology Re- view, 14(1-2):2-15.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Gold-standard for topic-specific sentiment analysis of economic texts", |
|
"authors": [ |
|
{ |
|
"first": "Pyry", |
|
"middle": [], |
|
"last": "Takala", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pekka", |
|
"middle": [], |
|
"last": "Malo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ankur", |
|
"middle": [], |
|
"last": "Sinha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oskar", |
|
"middle": [], |
|
"last": "Ahlgren", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC'14)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2152--2157", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pyry Takala, Pekka Malo, Ankur Sinha, and Oskar Ahlgren. 2014. Gold-standard for topic-specific sen- timent analysis of economic texts. In Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC'14), pages 2152- 2157, Reykjavik, Iceland. European Language Re- sources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "Music emotion classification of chinese songs based on lyrics using tf* idf and rhyme", |
|
"authors": [ |
|
{ |
|
"first": "Xing", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaoou", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deshun", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuqian", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "ISMIR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "765--770", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xing Wang, Xiaoou Chen, Deshun Yang, and Yuqian Wu. 2011. Music emotion classification of chinese songs based on lyrics using tf* idf and rhyme. In ISMIR, pages 765-770. Citeseer.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Music emotion identification from lyrics", |
|
"authors": [ |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Won-Sook", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "11th IEEE International Symposium on Multimedia", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "624--629", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dan Yang and Won-Sook Lee. 2009. Music emotion identification from lyrics. In 2009 11th IEEE Inter- national Symposium on Multimedia, pages 624-629. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "Toward multi-modal music emotion classification", |
|
"authors": [ |
|
{ |
|
"first": "Yi-Hsuan", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu-Ching", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heng-Tze", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I-Bin", |
|
"middle": [], |
|
"last": "Liao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yeh-Chin", |
|
"middle": [], |
|
"last": "Ho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Homer H", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Pacific-Rim Conference on Multimedia", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "70--79", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yi-Hsuan Yang, Yu-Ching Lin, Heng-Tze Cheng, I-Bin Liao, Yeh-Chin Ho, and Homer H Chen. 2008. To- ward multi-modal music emotion classification. In Pacific-Rim Conference on Multimedia, pages 70- 79. Springer.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"text": "Genres within Mihalcea/Strapparava HITs", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"text": "Figure A1: HIT Preliminary Instructions", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF3": { |
|
"uris": null, |
|
"text": "Figure A6: Annotator Enjoyment/Familiarity of Songs by Count and Annotation Quality", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"content": "<table><tr><td/><td/><td>: Examples from the Edmonds Dance Dataset</td></tr><tr><td/><td colspan=\"2\">Mihalcea/Strapparava Edmonds Dance</td></tr><tr><td>Songs</td><td>100</td><td>524</td></tr><tr><td>Lines</td><td>4976</td><td>22924</td></tr><tr><td>Words</td><td>109332</td><td>708985</td></tr><tr><td>Vocabulary</td><td>2233</td><td>6563</td></tr></table>", |
|
"html": null, |
|
"text": "", |
|
"type_str": "table" |
|
}, |
|
"TABREF2": { |
|
"num": null, |
|
"content": "<table><tr><td>: Lyrical Dataset Basic Statistics</td></tr><tr><td>erating 452 verse-based and 100 song-based anno-</td></tr><tr><td>tations. Table 2 provides some basic statistics for</td></tr><tr><td>the lyrical datasets used in our research.</td></tr><tr><td>Mechanical Turk We submitted HITs on Me-</td></tr><tr><td>chanical Turk to validate lyric annotations. Each</td></tr><tr><td>HIT contained three songs to be annotated from</td></tr><tr><td>the reader's perspective for 8 emotions on a 6-point</td></tr><tr><td>Likert scale. We also queried whether the annotator</td></tr><tr><td>had heard of each song (yes/no), and whether they</td></tr><tr><td>liked it (yes/no/unsure). Of the 186 songs anno-</td></tr><tr><td>tated in total, 93 were from the Edmonds Dance</td></tr><tr><td>dataset and 93 were from the Mihalcea/Strapparava</td></tr><tr><td>dataset. HITs encompassed multiple genres, with</td></tr><tr><td>the Edmonds Dance dataset mostly consisting of</td></tr><tr><td>electronic music and the Mihalcea/Strapparava</td></tr><tr><td>dataset mostly consisting of rock music. Figures 1</td></tr><tr><td>and 2 summarize HIT breakdowns by genre.</td></tr></table>", |
|
"html": null, |
|
"text": "", |
|
"type_str": "table" |
|
}, |
|
"TABREF4": { |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "", |
|
"type_str": "table" |
|
}, |
|
"TABREF5": { |
|
"num": null, |
|
"content": "<table><tr><td colspan=\"2\">Emotion CBET TEC</td><td>DD</td><td>Dance</td><td>M/S</td></tr><tr><td>Anger</td><td>11.2% 7.4%</td><td colspan=\"3\">1.0% 13.7% 9.1%</td></tr><tr><td colspan=\"2\">Disgust 10.7% 3.6%</td><td colspan=\"3\">0.3% 21.9% 2.9%</td></tr><tr><td>Fear</td><td colspan=\"4\">11.2% 13.3% 0.2% 19.7% 1.8%</td></tr><tr><td>Joy</td><td colspan=\"4\">13.4% 39.1% 12.5% 43.9% 50.4%</td></tr><tr><td colspan=\"5\">Sadness 11.4% 18.2% 1.1% 35.3% 33.0%</td></tr><tr><td colspan=\"5\">Surprise 11.4% 18.3% 1.8% 13.0% 0.9%</td></tr></table>", |
|
"html": null, |
|
"text": "datasets, three large collections of text annotated for multiple emotions including 6 core emotions present in both the Edmonds Dance and Mihalcea/Strapparava datasets. The CBET and TEC datasets respectively consist of 81,163 and 21,048 tweets, while the DailyDialog dataset consists of 102,979 statements collected from 13,118 transcripts of two-person conversations. Emotion distributions of the CBET, TEC, Daily Dialog, Edmonds Dance, and Mihalcea/Strapparava datasets are depicted inTable 4.", |
|
"type_str": "table" |
|
}, |
|
"TABREF6": { |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "Presence of Emotion by Dataset", |
|
"type_str": "table" |
|
}, |
|
"TABREF8": { |
|
"num": null, |
|
"content": "<table><tr><td colspan=\"4\">Emotion Naive Bayes CBET BERT Lyrics BERT</td></tr><tr><td>Anger</td><td>0.17</td><td>0.04</td><td>0.2</td></tr><tr><td>Disgust</td><td>0.21</td><td>0</td><td>0</td></tr><tr><td>Fear</td><td>0.18</td><td>0.14</td><td>0</td></tr><tr><td>Joy</td><td>0.03</td><td>0.24</td><td>0.69</td></tr><tr><td>Sadness</td><td>0.55</td><td>0.48</td><td>0.54</td></tr><tr><td>Surprise</td><td>0</td><td>0</td><td>0</td></tr></table>", |
|
"html": null, |
|
"text": "Model Accuracy on Lyrics By Emotion", |
|
"type_str": "table" |
|
}, |
|
"TABREF9": { |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "", |
|
"type_str": "table" |
|
}, |
|
"TABREF11": { |
|
"num": null, |
|
"content": "<table><tr><td>Dataset</td><td>Emotion</td><td>Pearson's Correlation</td><td>90% CI</td></tr><tr><td>Dance</td><td>All</td><td>0.396</td><td>(0.343, 0.445)</td></tr><tr><td>Dance</td><td>Anger</td><td>0.204</td><td>(0.033, 0.363)</td></tr><tr><td colspan=\"2\">Dance Anticipation</td><td>0.294</td><td>(0.129, 0.443)</td></tr><tr><td>Dance</td><td>Disgust</td><td>0.429</td><td>(0.278, 0.559)</td></tr><tr><td>Dance</td><td>Fear</td><td>0.31</td><td>(0.146, 0.457)</td></tr><tr><td>Dance</td><td>Joy</td><td>0.362</td><td>(0.203, 0.502)</td></tr><tr><td>Dance</td><td>Sadness</td><td>0.316</td><td>(0.154, 0.462)</td></tr><tr><td>Dance</td><td>Surprise</td><td>0.175</td><td>(0.003, 0.336)</td></tr><tr><td>Dance</td><td>Trust</td><td>0.384</td><td>(0.228, 0.522)</td></tr><tr><td>M/S</td><td>All</td><td>0.183</td><td>(0.124, 0.241)</td></tr><tr><td>M/S</td><td>Anger</td><td>0.28</td><td>(0.114, 0.431)</td></tr><tr><td>M/S</td><td>Disgust</td><td>0.214</td><td>(0.045, 0.371)</td></tr><tr><td>M/S</td><td>Fear</td><td>0.499</td><td>(0.358, 0.618)</td></tr><tr><td>M/S</td><td>Joy</td><td>0.439</td><td>(0.289, 0.568)</td></tr><tr><td>M/S</td><td>Sadness</td><td>0.477</td><td>(0.333, 0.6)</td></tr><tr><td>M/S</td><td>Surprise</td><td>0.01</td><td>(-0.161, 0.18)</td></tr><tr><td>: Interannotator Agreement at Cohen's Kappa</td><td/><td/><td/></tr><tr><td>Threshold of 0.25</td><td/><td/><td/></tr></table>", |
|
"html": null, |
|
"text": "", |
|
"type_str": "table" |
|
}, |
|
"TABREF12": { |
|
"num": null, |
|
"content": "<table><tr><td>: Pearson's Correlation between Original and</td></tr><tr><td>Turker annotations</td></tr></table>", |
|
"html": null, |
|
"text": "", |
|
"type_str": "table" |
|
}, |
|
"TABREF14": { |
|
"num": null, |
|
"content": "<table><tr><td>: Krippendorf's Alpha Values By Emotion</td></tr><tr><td>Over 50 Annotator Groups</td></tr><tr><td>Figure A4: Correlation Heatmap of Krippendorf's Al-</td></tr><tr><td>phas</td></tr></table>", |
|
"html": null, |
|
"text": "", |
|
"type_str": "table" |
|
}, |
|
"TABREF16": { |
|
"num": null, |
|
"content": "<table><tr><td>Emotion</td><td>Test</td><td colspan=\"3\">Accuracy Precision Recall</td></tr><tr><td>Anger</td><td>Dance Original</td><td>0.65</td><td>0.14</td><td>0.29</td></tr><tr><td>Anger</td><td>Dance Turk</td><td>0.63</td><td>0.14</td><td>0.31</td></tr><tr><td>Anger</td><td>M/S Original</td><td>0.7</td><td>0.12</td><td>0.34</td></tr><tr><td>Anger</td><td>M/S Turk</td><td>0.84</td><td>0.23</td><td>0.38</td></tr><tr><td colspan=\"2\">Disgust Dance Original</td><td>0.77</td><td>0.43</td><td>0.2</td></tr><tr><td>Disgust</td><td>Dance Turk</td><td>0.84</td><td>0</td><td>0</td></tr><tr><td>Disgust</td><td>M/S Original</td><td>0.82</td><td>0.04</td><td>0.23</td></tr><tr><td>Disgust</td><td>M/S Turk</td><td>0.92</td><td>0.29</td><td>0.5</td></tr><tr><td>Fear</td><td>Dance Original</td><td>0.7</td><td>0.27</td><td>0.3</td></tr><tr><td>Fear</td><td>Dance Turk</td><td>0.73</td><td>0.35</td><td>0.53</td></tr><tr><td>Fear</td><td>M/S Original</td><td>0.84</td><td>0.03</td><td>0.25</td></tr><tr><td>Fear</td><td>M/S Turk</td><td>0.77</td><td>0.14</td><td>0.18</td></tr><tr><td>Joy</td><td>Dance Original</td><td>0.57</td><td>0.8</td><td>0.02</td></tr><tr><td>Joy</td><td>Dance Turk</td><td>0.45</td><td>0</td><td>0</td></tr><tr><td>Joy</td><td>M/S Original</td><td>0.5</td><td>0.67</td><td>0.02</td></tr><tr><td>Joy</td><td>M/S Turk</td><td>0.54</td><td>0</td><td>0</td></tr><tr><td colspan=\"2\">Sadness Dance Original</td><td>0.61</td><td>0.47</td><td>0.77</td></tr><tr><td>Sadness</td><td>Dance Turk</td><td>0.53</td><td>0.42</td><td>0.79</td></tr><tr><td>Sadness</td><td>M/S Original</td><td>0.55</td><td>0.4</td><td>0.77</td></tr><tr><td>Sadness</td><td>M/S Turk</td><td>0.61</td><td>0.58</td><td>0.71</td></tr><tr><td colspan=\"2\">Surprise Dance Original</td><td>0.86</td><td>0</td><td>0</td></tr><tr><td>Surprise</td><td>Dance Turk</td><td>0.87</td><td>0</td><td>0</td></tr><tr><td>Surprise</td><td>M/S Original</td><td>0.97</td><td>0</td><td>0</td></tr><tr><td>Surprise</td><td>M/S Turk</td><td>0.91</td><td>0</td><td>0</td></tr></table>", |
|
"html": null, |
|
"text": "BERT Trained on CBET Variations", |
|
"type_str": "table" |
|
}, |
|
"TABREF17": { |
|
"num": null, |
|
"content": "<table><tr><td>Emotion</td><td>Test</td><td colspan=\"3\">Accuracy Precision Recall</td></tr><tr><td>Anger</td><td>Dance Original</td><td>0.85</td><td>0.1</td><td>0.01</td></tr><tr><td>Anger</td><td>Dance Turk</td><td>0.84</td><td>0</td><td>0</td></tr><tr><td>Anger</td><td>M/S Original</td><td>0.86</td><td>0.04</td><td>0.02</td></tr><tr><td>Anger</td><td>M/S Turk</td><td>0.87</td><td>0</td><td>0</td></tr><tr><td colspan=\"2\">Disgust Dance Original</td><td>0.78</td><td>0</td><td>0</td></tr><tr><td>Disgust</td><td>Dance Turk</td><td>0.95</td><td>0</td><td>0</td></tr><tr><td>Disgust</td><td>M/S Original</td><td>0.96</td><td>0</td><td>0</td></tr><tr><td>Disgust</td><td>M/S Turk</td><td>0.96</td><td>0</td><td>0</td></tr><tr><td>Fear</td><td>Dance Original</td><td>0.74</td><td>0.26</td><td>0.17</td></tr><tr><td>Fear</td><td>Dance Turk</td><td>0.76</td><td>0.27</td><td>0.18</td></tr><tr><td>Fear</td><td>M/S Original</td><td>0.93</td><td>0</td><td>0</td></tr><tr><td>Fear</td><td>M/S Turk</td><td>0.81</td><td>0.11</td><td>0.09</td></tr><tr><td>Joy</td><td>Dance Original</td><td>0.58</td><td>0.61</td><td>0.12</td></tr><tr><td>Joy</td><td>Dance Turk</td><td>0.48</td><td>0.71</td><td>0.1</td></tr><tr><td>Joy</td><td>M/S Original</td><td>0.53</td><td>0.73</td><td>0.11</td></tr><tr><td>Joy</td><td>M/S Turk</td><td>0.56</td><td>0.67</td><td>0.09</td></tr><tr><td colspan=\"2\">Sadness Dance Original</td><td>0.6</td><td>0.36</td><td>0.18</td></tr><tr><td>Sadness</td><td>Dance Turk</td><td>0.65</td><td>0.54</td><td>0.21</td></tr><tr><td>Sadness</td><td>M/S Original</td><td>0.66</td><td>0.36</td><td>0.03</td></tr><tr><td>Sadness</td><td>M/S Turk</td><td>0.54</td><td>0.58</td><td>0.16</td></tr><tr><td colspan=\"2\">Surprise Dance Original</td><td>0.87</td><td>0</td><td>0</td></tr><tr><td>Surprise</td><td>Dance Turk</td><td>0.86</td><td>0</td><td>0</td></tr><tr><td>Surprise</td><td>M/S Original</td><td>0.98</td><td>0</td><td>0</td></tr><tr><td>Surprise</td><td>M/S Turk</td><td>0.91</td><td>0</td><td>0</td></tr></table>", |
|
"html": null, |
|
"text": "Naive Bayes Trained on Augmented CBET", |
|
"type_str": "table" |
|
}, |
|
"TABREF18": { |
|
"num": null, |
|
"content": "<table><tr><td colspan=\"2\">Emotion Train</td><td>Test</td><td colspan=\"4\">Accuracy AUC Prec Rec</td></tr><tr><td>Anger</td><td>TEC</td><td>Dance</td><td>0.85</td><td colspan=\"3\">0.58 0.39 0.22</td></tr><tr><td>Anger</td><td>TEC</td><td>M/S</td><td>0.89</td><td colspan=\"3\">0.55 0.29 0.15</td></tr><tr><td>Anger</td><td colspan=\"2\">TEC Dance Turk</td><td>0.78</td><td colspan=\"3\">0.49 0.11 0.08</td></tr><tr><td>Anger</td><td>TEC</td><td>M/S Turk</td><td>0.92</td><td>0.56</td><td>1</td><td>0.13</td></tr><tr><td colspan=\"2\">Disgust TEC</td><td>Dance</td><td>0.78</td><td>0.5</td><td>0</td><td>0</td></tr><tr><td colspan=\"2\">Disgust TEC</td><td>M/S</td><td>0.97</td><td>0.5</td><td>0</td><td>0</td></tr><tr><td colspan=\"3\">Disgust TEC Dance Turk</td><td>0.95</td><td>0.5</td><td>0</td><td>0</td></tr><tr><td colspan=\"2\">Disgust TEC</td><td>M/S Turk</td><td>0.96</td><td>0.5</td><td>0</td><td>0</td></tr><tr><td>Fear</td><td>TEC</td><td>Dance</td><td>0.74</td><td colspan=\"3\">0.58 0.33 0.32</td></tr><tr><td>Fear</td><td>TEC</td><td>M/S</td><td>0.9</td><td>0.76</td><td colspan=\"2\">0.1 0.63</td></tr><tr><td>Fear</td><td colspan=\"2\">TEC Dance Turk</td><td>0.77</td><td>0.54</td><td colspan=\"2\">0.3 0.18</td></tr><tr><td>Fear</td><td>TEC</td><td>M/S Turk</td><td>0.8</td><td colspan=\"3\">0.61 0.27 0.36</td></tr><tr><td>Joy</td><td>TEC</td><td>Dance</td><td>0.67</td><td colspan=\"3\">0.66 0.65 0.54</td></tr><tr><td>Joy</td><td>TEC</td><td>M/S</td><td>0.66</td><td colspan=\"3\">0.66 0.68 0.62</td></tr><tr><td>Joy</td><td colspan=\"2\">TEC Dance Turk</td><td>0.68</td><td colspan=\"3\">0.68 0.74 0.63</td></tr><tr><td>Joy</td><td>TEC</td><td>M/S Turk</td><td>0.66</td><td colspan=\"3\">0.66 0.62 0.65</td></tr><tr><td colspan=\"2\">Sadness TEC</td><td>Dance</td><td>0.7</td><td>0.61</td><td colspan=\"2\">0.7 0.28</td></tr><tr><td colspan=\"2\">Sadness TEC</td><td>M/S</td><td>0.73</td><td>0.6</td><td colspan=\"2\">0.83 0.23</td></tr><tr><td colspan=\"3\">Sadness TEC Dance Turk</td><td>0.75</td><td colspan=\"3\">0.69 0.76 0.47</td></tr><tr><td colspan=\"2\">Sadness TEC</td><td>M/S Turk</td><td>0.51</td><td colspan=\"3\">0.49 0.46 0.13</td></tr><tr><td colspan=\"2\">Surprise TEC</td><td>Dance</td><td>0.85</td><td>0.49</td><td>0</td><td>0</td></tr><tr><td colspan=\"2\">Surprise TEC</td><td>M/S</td><td>0.97</td><td>0.49</td><td>0</td><td>0</td></tr><tr><td colspan=\"3\">Surprise TEC Dance Turk</td><td>0.88</td><td>0.5</td><td>0</td><td>0</td></tr><tr><td colspan=\"2\">Surprise TEC</td><td>M/S Turk</td><td>0.9</td><td>0.49</td><td>0</td><td>0</td></tr><tr><td/><td/><td/><td/><td/><td/><td>: Random Forest Trained on Transformed</td></tr><tr><td/><td/><td/><td/><td/><td/><td>CBET</td></tr><tr><td/><td/><td/><td/><td/><td/><td>Naive Bayes and Random Forest models, while</td></tr><tr><td/><td/><td/><td/><td/><td/><td>models for surprise were more or less equivalent</td></tr></table>", |
|
"html": null, |
|
"text": "", |
|
"type_str": "table" |
|
}, |
|
"TABREF19": { |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "", |
|
"type_str": "table" |
|
}, |
|
"TABREF21": { |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "", |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |