|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T02:11:57.982264Z" |
|
}, |
|
"title": "Developing a New Classifier for Automated Identification of Incivility in Social Media", |
|
"authors": [ |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Davidson", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "ssdavidson@ucdavis.edu" |
|
}, |
|
{ |
|
"first": "Qiusi", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Amsterdam", |
|
"location": {} |
|
}, |
|
"email": "qssun@ucdavis.edu" |
|
}, |
|
{ |
|
"first": "Magdalena", |
|
"middle": [], |
|
"last": "Wojcieszak", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Amsterdam", |
|
"location": {} |
|
}, |
|
"email": "mwojcieszak@ucdavis.edu" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Incivility is not only prevalent on online social media platforms, but also has concrete effects on individual users, online groups, the platforms themselves, and the society at large. Given the prevalence and effects of online incivility, and the challenges involved in humanbased incivility detection, it is urgent to develop validated and versatile automatic approaches to identifying uncivil posts and comments. This project advances both a neural, BERT-based classifier as well as a logistic regression classifier to identify uncivil comments. The classifier is trained on a dataset of Reddit posts, which are annotated for incivility, and further expanded using a combination of labeled data from Reddit and Twitter. Our best performing model achieves an F 1 of 0.802 on our Reddit test set. The final model is not only applicable across social media platforms and their distinct data structures, but also computationally versatile, and-as such-ready to be used on vast volumes of online data. All trained models and annotated data are made available to the research community.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Incivility is not only prevalent on online social media platforms, but also has concrete effects on individual users, online groups, the platforms themselves, and the society at large. Given the prevalence and effects of online incivility, and the challenges involved in humanbased incivility detection, it is urgent to develop validated and versatile automatic approaches to identifying uncivil posts and comments. This project advances both a neural, BERT-based classifier as well as a logistic regression classifier to identify uncivil comments. The classifier is trained on a dataset of Reddit posts, which are annotated for incivility, and further expanded using a combination of labeled data from Reddit and Twitter. Our best performing model achieves an F 1 of 0.802 on our Reddit test set. The final model is not only applicable across social media platforms and their distinct data structures, but also computationally versatile, and-as such-ready to be used on vast volumes of online data. All trained models and annotated data are made available to the research community.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Given the growing polarization in the United States, the increasing popularity of partisan media, and the widespread use of social media for information and discussion (see Iyengar et al. (2019) for a review), many scholars and observers worry about the accelerated use and spread of incivility in the online environment. Incivility, defined as \"features of discussion that convey disrespectful tone toward the discussion forum, its participants, or its topics\" (Coe et al., 2014) is a common aspect of many online communities, especially anonymous forums (Reader, 2012 ) such as Reddit. Estimates suggest that more than 84% of Americans have experienced incivility online, and among those who have ever experienced it, the number of average weekly en-counters with incivility was as high as 10.6 times (KRC Research, 2018) . In addition to lowering the standards of public discourse, incivility has concrete effects on users, online discussions, and social media platforms. The use of and exposure to incivility generates negative emotions, such as anger, anxiety, or mental distress, and is related to aggression (Gervais, 2015) and hostile communication (Groshek and Cutino, 2016) . Incivility also turns users away from online discussions altogether (Anderson et al., 2014; Bauman et al., 2013; Moor et al., 2010; Ransbotham et al., 2016) . Given these reasons for the public and industry to be concerned with online incivility, many companies seek to automatically detect incivility in order to understand its scope, identify the online communities in which incivility is particularly prevalent, and -ultimately -address the problem.", |
|
"cite_spans": [ |
|
{ |
|
"start": 173, |
|
"end": 194, |
|
"text": "Iyengar et al. (2019)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 462, |
|
"end": 480, |
|
"text": "(Coe et al., 2014)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 556, |
|
"end": 569, |
|
"text": "(Reader, 2012", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 803, |
|
"end": 823, |
|
"text": "(KRC Research, 2018)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 1157, |
|
"end": 1183, |
|
"text": "(Groshek and Cutino, 2016)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1254, |
|
"end": 1277, |
|
"text": "(Anderson et al., 2014;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1278, |
|
"end": 1298, |
|
"text": "Bauman et al., 2013;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1299, |
|
"end": 1317, |
|
"text": "Moor et al., 2010;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 1318, |
|
"end": 1342, |
|
"text": "Ransbotham et al., 2016)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This project offers a step in this direction. We present machine learning models for detecting incivility in social media, models that are not only computationally efficient but also applicable across platforms. We propose both a BERT-based neural classifier as well as a logistic regression based classifier trained on manually annotated and artificially labeled data. Our results suggest that the proposed models perform well across distinct data/communication structures of different platforms, and, as such, can be easily applied to detect incivility.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "There is considerable conceptual and operational ambiguity in the literature on incivility and related concepts under the umbrella of offensive or intolerant speech (see (Rossini, 2020) for a review). Some studies use incivility interchangeably with hate speech, which refers to speech that aims to discriminate against a certain identity group, or aggressive or toxic language, which includes personal attacks (R\u00f6sner and Kr\u00e4mer, 2016) . However, incivility is a broader concept, which focuses on content that goes against acceptable social norms in terms of vulgarity, name-calling, or offensive language (Papacharissi, 2004) , whereas hate speech or aggressive language captures more specifically discourse that offends, derogates, or silences others and may promote harm (Rossini, 2020). Increasingly, incivility is conceptually and operationally distinguished from such intolerant discourse, and evidence suggests that the effects of these two forms of expressions also differ (Rossini, 2020). Definitions of incivility vary, ranging from \"a normdefying behavior\" (Gervais, 2015), \"an explicit attack\" (Anderson and Huntington, 2017), to the violation of interpersonal politeness norms (Mutz, 2015; Mutz and Reeves, 2005 ), yet most include a lack of respect toward discussion participants or arguments (Santana, 2014) , and a impolite tone of discourse (Papacharissi, 2004) . The often used definition, which we adopt for the purpose of our machine learning model, sees incivility as features of discussion that convey disrespectful tone toward the discussion participants or its topics, including name-calling, mean-spirited or disparaging words directed at a person or group of people, an idea, plan, policy, or behavior, vulgarity, using profanity or language that would not be considered proper in professional discourse, and pejorative remarks about the way in which a person communicates (Coe et al., 2014) . As such, our approach encompasses both the less societally detrimental foul language or harsh tone as well as the more intolerant discourse.", |
|
"cite_spans": [ |
|
{ |
|
"start": 411, |
|
"end": 436, |
|
"text": "(R\u00f6sner and Kr\u00e4mer, 2016)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 607, |
|
"end": 627, |
|
"text": "(Papacharissi, 2004)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 1191, |
|
"end": 1203, |
|
"text": "(Mutz, 2015;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 1204, |
|
"end": 1225, |
|
"text": "Mutz and Reeves, 2005", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 1308, |
|
"end": 1323, |
|
"text": "(Santana, 2014)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 1359, |
|
"end": 1379, |
|
"text": "(Papacharissi, 2004)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 1900, |
|
"end": 1918, |
|
"text": "(Coe et al., 2014)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Previous Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "From a technical perspective, previous research using machine learning models to detect incivility and other offensive or intolerant language online has focused primarily on the use of logistic regression (Theocharis et al., 2020; Daxenberger et al., 2018; Maity et al., 2018) , support vector machines (Joksimovic et al., 2019; Maity et al., 2018) , and various neural classification models (Sadeque et al., 2019) . BERT (Devlin et al., 2019) and related transfomer language models have been used in related tasks, such as identifying abusive language on Twitter (Nikolov and Radivchev, 2019; Risch et al., 2019) , including many entrants in the OffensEval task at SemEval-2020 (Zampieri et al., 2020) . To our knowledge, this paper is the first to utilize a fine-tuned BERT model to identify incivility on social media platforms, and one of few projects that train the classifier on data from more than one platform. Also, past work on identifying incivility over time has mostly analyzed Twitter data during certain political events, such as the 2016 presidential election in the US (Siegel et al., 2018) , and/or looked at political incivility in specific contexts (e.g., among politicians, e.g., (Theocharis et al., 2020) . These rather narrow, single-platform foci limit the applicability of the developed classifications, a limitation we address in this project.", |
|
"cite_spans": [ |
|
{ |
|
"start": 205, |
|
"end": 230, |
|
"text": "(Theocharis et al., 2020;", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 231, |
|
"end": 256, |
|
"text": "Daxenberger et al., 2018;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 257, |
|
"end": 276, |
|
"text": "Maity et al., 2018)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 303, |
|
"end": 328, |
|
"text": "(Joksimovic et al., 2019;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 329, |
|
"end": 348, |
|
"text": "Maity et al., 2018)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 392, |
|
"end": 414, |
|
"text": "(Sadeque et al., 2019)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 422, |
|
"end": 443, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 564, |
|
"end": 593, |
|
"text": "(Nikolov and Radivchev, 2019;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 594, |
|
"end": 613, |
|
"text": "Risch et al., 2019)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 679, |
|
"end": 702, |
|
"text": "(Zampieri et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1086, |
|
"end": 1107, |
|
"text": "(Siegel et al., 2018)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 1201, |
|
"end": 1226, |
|
"text": "(Theocharis et al., 2020)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Previous Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In addition to these contributions of our work, our primary contribution may lie in our data augmentation method. Specifically, we extend recent approaches to automatically label additional training data to improve the performance of a logistic regression classifier. Previous work in detection of offensive language has used back-translation (Ibrahim et al., 2020) and data transformation techniques (Rizos et al., 2019) to augment limited training data. While some work (Theocharis et al., 2020) utilizes the Google Perspectives API to label additional training data, which introduces noise to the operationalization of incivility, we take advantage of our well-performing BERT classification model to generate artificial training data for a logistic regression classifier. The resulting classifier can be efficiently run on CPU and is far less computationally expensive than our comparably performing BERT model. This extension makes our classifier easily applicable to vast amounts of data and readily implemented on social media platforms or the comments sections of websites of news media organizations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 343, |
|
"end": 365, |
|
"text": "(Ibrahim et al., 2020)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 401, |
|
"end": 421, |
|
"text": "(Rizos et al., 2019)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 472, |
|
"end": 497, |
|
"text": "(Theocharis et al., 2020)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Previous Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "communities, known as subreddits. Each subreddit has a general topic, behavioral norms, and community standards, allowing for a creation of a diverse dataset, which further increases the applicability of the resulting machine learning model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Previous Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "To tackle the detection problem, we identified the most popular subreddits from 2006 to 2019 that contained 95% of the total comments by (1) the number of comments in the subreddit each year, and (2) the number of followers that commented in the subreddit each year, which resulted in 9355 subreddits across the years. We then collected 5000 comments from these subreddits using stratified random sampling technique, such that the random sampling from each year is based on each year's proportion in the total number of comments. These 5000 posts were the manually labeled.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Previous Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Instead of adapting annotation schema that focused on profanity and swear words or phrases (i.e., the more narrow definition of incivility) (Zampieri et al., 2019; Mohan et al., 2017; Almerekhi et al., 2020) , we developed a coding manual to classify comments according to four dimensions present in offensive speech more broadly. We account for whether a comment contains: (1) name-calling, mean-spirited or disparaging words directed at a person or a group of people; (2) aspersion, meanspirited or disparaging words directed at an idea, plan, policy or behavior; (3) pejorative or disparaging remark about the way in which a person communicates, and (4) vulgarity, profanity or language that would not be considered proper. Our operational approach accounted for the content aspect (e.g., vulgarity or profanity, such as \"you're a dumbass for simplifying the issue and trying to jump right into the helm of the 'y'r all hypocrites' bandwagon\") and the different targets of incivility or foul content included in the intolerant discourse (e.g., \"... the interests of left-handed black female dwarves\"), to create a comprehensive and inclusive annotated dataset for model building. Annotators were asked to apply a binary label to indicate whether or not the comment contains incivility. The annotators were three undergraduate students in social sciences at UC Davis, two native English speakers and one with English as the second language. Two annotators are heavy Reddit users and one is a user of other social media. The annotators were trained on the definitions and proce-dures, and each of them completed five pilot coding exercises. Each annotator first independently coded a random set of 50 comments with Fleiss's kappa of 0.618. They then compared results, discussed and resolved discrepancies, and clarified confusions. These steps were repeated multiple times with increasingly large comment sets until an acceptable agreement level was reached. In total, all three annotators completed 1000 comments together during training, with Fleiss's kappa of 0.663. The major discrepancies pertain to potentially sarcastic comments (e.g., \"Great, now we're paying for CBC to promote cuckoldry\"), which some coders saw as uncivil and others as innocent sarcasm. After an acceptable coding precision was established among the three annotators, the remaining 4000 comments were randomly divided into three sets and each annotator independently coded an assigned set. The final result of this process is a set of 5000 comments labeled for incivility. Additionally, our dataset includes coding at the subreddit level to identify subreddits that were political, non-political, or mixed (i.e., contained some political and some non-political content). This allows us to analyze the prevalence of incivility across different kinds of online discussions and across the political spectrum.", |
|
"cite_spans": [ |
|
{ |
|
"start": 140, |
|
"end": 163, |
|
"text": "(Zampieri et al., 2019;", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 164, |
|
"end": 183, |
|
"text": "Mohan et al., 2017;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 184, |
|
"end": 207, |
|
"text": "Almerekhi et al., 2020)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset Annotation", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "To demonstrate the efficacy of our collected dataset, we use supervised machine learning to automatically identify uncivil Reddit posts. However, annotating a dataset large enough to train a state-ofthe-art neural classifier from scratch is a costly and time-consuming undertaking. We experimented with several neural binary classifiers, with our bestperforming models built on top of transformerbased language models, namely BERT (Devlin et al., 2019) and its relative, DistilBERT (Sanh et al., 2019) . Past work has demonstrated that finetuning large, pre-trained language models, such as BERT and DistilBERT, is an effective method for creating a high-quality neural classifier with limited supervised training data. As described in Sun et al. (2019) , we conduct additional pretraining of the BERT-base and DistilBERT-base models on a large collection of Reddit posts as in-domain data. Once pretrained, we fine-tune our models for classification on our annotated dataset of Reddit comments, which trained annotators classified with binary labels for incivility.", |
|
"cite_spans": [ |
|
{ |
|
"start": 431, |
|
"end": 452, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 482, |
|
"end": 501, |
|
"text": "(Sanh et al., 2019)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 736, |
|
"end": 753, |
|
"text": "Sun et al. (2019)", |
|
"ref_id": "BIBREF36" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Classifier Training", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Finally, in an effort to extend past work by creating a more flexible, platform agnostic classifier, we train a logistic regression classifier for incivility prediction in social media by combining the data presented in Theocharis et al. (2020) with our annotated and artificially labeled datasets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 220, |
|
"end": 244, |
|
"text": "Theocharis et al. (2020)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Classifier Training", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Our Reddit dataset (including annotation disagreements), test predictions, scripts and models are available on the project GitHub repository 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Classifier Training", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Our BERT and DistilBERT models begin with the respective base pretrained language models, as implemented in HuggingFace's Transformer's package. We then further pretrain these models on dataset of 3 million Reddit posts, for 100,000 training steps (as suggested by Sun et al. (2019) ) using the masked word prediction task (Devlin et al., 2019) . We then utilize these pretrained models in a classification setup, utilizing a softmax layer to predict binary class probability based on the [CLS] token in BERT's final hidden layer. For classification fine-tuning, all inputs to the models are limited to 256 tokens in length, with a training batch size of 16. We use the AdamW optimizer (Gugger and Howard, 2018) with default learning rate and epsilon values. We fine-tune our model for classification for four epochs on our dataset of 5,000 Reddit posts which are coded for incivility, with 10% of the data set aside for training validation, and 1000 annotated posts set aside for model testing. Classification results using BERT and DistilBERT are shown in Table 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 265, |
|
"end": 282, |
|
"text": "Sun et al. (2019)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 323, |
|
"end": 344, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 489, |
|
"end": 494, |
|
"text": "[CLS]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 686, |
|
"end": 711, |
|
"text": "(Gugger and Howard, 2018)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1058, |
|
"end": 1065, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments and Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Precision Recall F 1 BERT 0.814 0.76 0.786 DistilBERT 0.936 0.702 0.802 One major goal of this project is to classify multiple years of Reddit data for further analysis of incivility across political and non-political subreddits. Despite the acceptable performance of our BERT classification models, the models were too computationally expensive to classify the approximately 800 million posts per year we collected from Reddit. To address this constraint, we also train a logistic regression classification model to be able to classify large amounts of Reddit data with-out the use of expensive neural classifiers. However, given the small size of our annotated training set, we must generate additional training data to train an effective logistic regression model. In order to improve system performance, we first use our fine-tuned DistilBERT model to classify a large collection of Reddit posts. We then uptrain a logistic regression model on this synthetic data, along with our annotated data. As detailed in Section 5, the resulting model achieves an F 1 score which is competitive with our BERT and DistilBERT models, while also being able to classify data more quickly and at lower computational cost, making our model widely applicable.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "All logistic regression models are trained using TFIDF of stemmed unigrams as features. Given the relative imbalance of labels in our training data, in which positive examples of incivility represent only 10.3% of annotated posts, we use ADASYN (He et al., 2008) to generate additional synthetic data for oversampling. We train a second model on synthetic data consisting of 5 million Reddit posts which are labeled for incivility using our trained DistilBERT model. Results are shown in Table 2 To test the overlap of concepts such as hate speech and offensive language with incivility, we applied the classifier provided by Davidson et al. (2017) to the test portion of our Reddit dataset. To conduct our test, we combined the classes \"offensive language\" and \"hate speech\" predicted by the Davidson et al. (2017) classifier into a single class. On our Reddit data, this classifier achieves an F1 of 0.242, indicating limited overlap between the these domains. This test demonstrates that the definitional, conceptual, and operational differences between incivility and related domains of offensive speech are indeed represented in our labeled data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 245, |
|
"end": 262, |
|
"text": "(He et al., 2008)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 626, |
|
"end": 648, |
|
"text": "Davidson et al. (2017)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 793, |
|
"end": 815, |
|
"text": "Davidson et al. (2017)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 488, |
|
"end": 495, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In order to further test the efficacy of our implementation, we train a logistic regression model as outlined above using Twitter data collected and annotated by Theocharis et al. (2020) . Finally, to create our platform agnostic model, we train a logistic regression model by combining our annotated and synthetic Reddit data with the annotated and synthetic data from Theocharis et al. (2020) , which we test on the Theocharis et al. (2020) Twitter test set, as shown in Table 3 . for classifier training. We have applied our trained classifier to 95% of Reddit comments from the year 2017, finding that 9.21% of non-political comments are uncivil, compared to 14.75% of political comments; initial results that indicate relative prevalence of incivility in online political discourse. Due to the scale of the data to be ultimately classified, we were concerned as much with computational efficiency as with prediction accuracy when building our incivility classifier. When we use our trained BERT model to generate a large quantity of synthetically labeled training data, the performance of our log regression model is comparable to that of the our fine-tuned BERT models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 162, |
|
"end": 186, |
|
"text": "Theocharis et al. (2020)", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 370, |
|
"end": 394, |
|
"text": "Theocharis et al. (2020)", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 418, |
|
"end": 442, |
|
"text": "Theocharis et al. (2020)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 473, |
|
"end": 480, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Concern with computational efficiency also informed our choice of features in our logistic regression model. While alternate features could be used, such as Doc2Vec or Word2Vec embeddings, we chose to use TFIDF due to the simplicity of calculating these features. Additionally, the choice of TFIDF was informed by the work of Theocharis et al. (2020) , who demonstrate the utility of TFIDF for the task for incivility classification. Finally, the fact that our TFIDF-based logistic regression model performs similarly well to the BERT model is evidence of the effectiveness of the choice of TFIDF features. That said, the use of alternate features may improve model performance, and we leave this to future work.", |
|
"cite_spans": [ |
|
{ |
|
"start": 326, |
|
"end": 350, |
|
"text": "Theocharis et al. (2020)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Analysis and Discussion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The similarity between the predictions made by our BERT model and our logistic regression model indicates that the logistic regression model retains much of the predictive power of the BERT model. In fact, across 996 test comments, the two models disagreed on only 27 comments, for a rate of 2.7%. From reviewing the disagreements we can identify several classes of comment on which the two mod-els often disagree. The first, and most obvious, is very long comments. BERT is designed to truncate long input text (our implementation truncates inputs longer than 256 tokenized word pieces). Thus, our BERT model may mislabel longer comments in which the incivility occurs later in the comment. Another source of disagreement comes from the fact that our TFIDF-based classifier tends to be more sensitive to individual lexical items, which is to be expected as BERT is known to condense far more semantic information than do count-based vectorization techniques such as TFIDF (Jawahar et al., 2019) . For example, our regression model mislabels the comment \"This is dope! Does anyone know where I can purchase one for myself?\" as an uncivil comment, presumably due to the presence of the word \"dope\", while our BERT model labels the comment correctly. In future work, we plan to conduct a more rigorous analysis of labelling disagreements between the two models to better understand the role of lexicon and compositional semantics in the incivility classification task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 973, |
|
"end": 995, |
|
"text": "(Jawahar et al., 2019)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Analysis and Discussion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Finally, we demonstrate the flexibility of our model training strategy by creating a combined incivility prediction model using our automatically labeled Reddit data with the synthetic data provided by Theocharis et al. (2020) . The resulting model has shown promise as a platform agnostic incivility classifier model for social media.", |
|
"cite_spans": [ |
|
{ |
|
"start": 202, |
|
"end": 226, |
|
"text": "Theocharis et al. (2020)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Analysis and Discussion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In this paper, we present a new dataset of Reddit posts annotated at the comment level for incivility, as well as at the subreddit level for political content. Further, we demonstrate the efficacy of this dataset to train machine learning models for incivility detection, both alone and in combination with previously available datasets, to create a platform agnostic classifier for incivility on social media.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Using our trained classifier, our future goal is to provide a systematic overview of trends in incivility on social media, across time and variety of discussion topics. The project aims to capture the fluctuations in the prevalence of incivility in political and non-political online spaces, politically homogeneous and heterogeneous discussions, liberal and conservative ones, and also among different non-political topics. The anticipated study will add our understanding of the development of online incivility and shed light on incivility interventions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "DatasetWe present a corpus of Reddit data annotated for incivility at the post level, and further annotated for political/non-political content at the subreddit level. We chose to use Reddit as the source of our dataset because Reddit is the sixth most popular website in the US and the third most visited social media platform following YouTube and Facebook (Alexa.com, 2019). There are more than 430 million active users worldwide on Reddit(Perez, 2019). Also, the anonymous nature of Reddit makes it popular for sharing information and engaging in long and complex discussions, making it ideal for observing online discourse. Furthermore, the public nature of Reddit allowed us to gather a large number of posts from across various user", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/ssdavidson/reddit incivility", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Alexa -Top Sites in United States -Alexa", |
|
"authors": [ |
|
{ |
|
"first": "Alexa", |
|
"middle": [ |
|
"Com" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexa.com. 2019. Alexa -Top Sites in United States - Alexa.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Investigating Toxicity Across Multiple Reddit Communities, Users, and Moderators", |
|
"authors": [ |
|
{ |
|
"first": "Hind", |
|
"middle": [], |
|
"last": "Almerekhi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Companion Proceedings of the Web Conference 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "294--298", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hind Almerekhi, Supervised by Bernard J Jansen, and co-supervised by Haewoon Kwak. 2020. Investigat- ing Toxicity Across Multiple Reddit Communities, Users, and Moderators. In Companion Proceedings of the Web Conference 2020, pages 294-298.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Online incivility and risk perceptions of emerging technologies", |
|
"authors": [ |
|
{ |
|
"first": "Dominique", |
|
"middle": [], |
|
"last": "Ashley A Anderson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Brossard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Dietram", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Scheufele", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Xenos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ladwig", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Journal of Computer-Mediated Communication", |
|
"volume": "19", |
|
"issue": "3", |
|
"pages": "373--387", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashley A Anderson, Dominique Brossard, Dietram A Scheufele, Michael A Xenos, and Peter Ladwig. 2014. The \"nasty effect:\" Online incivility and risk perceptions of emerging technologies. Journal of Computer-Mediated Communication, 19(3):373- 387.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Social media, science, and attack discourse: How Twitter discussions of climate change use sarcasm and incivility", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Ashley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heidi", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Anderson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Huntington", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Science Communication", |
|
"volume": "39", |
|
"issue": "5", |
|
"pages": "598--620", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashley A Anderson and Heidi E Huntington. 2017. So- cial media, science, and attack discourse: How Twit- ter discussions of climate change use sarcasm and incivility. Science Communication, 39(5):598-620.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Associations among bullying, cyberbullying, and suicide in high school students", |
|
"authors": [ |
|
{ |
|
"first": "Sheri", |
|
"middle": [], |
|
"last": "Bauman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Russell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jenny", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Toomey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Walker", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Journal of adolescence", |
|
"volume": "36", |
|
"issue": "2", |
|
"pages": "341--350", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sheri Bauman, Russell B Toomey, and Jenny L Walker. 2013. Associations among bullying, cyberbullying, and suicide in high school students. Journal of ado- lescence, 36(2):341-350.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Online and uncivil? patterns and determinants of incivility in newspaper website comments", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Coe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kate", |
|
"middle": [], |
|
"last": "Kenski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen A", |
|
"middle": [], |
|
"last": "Rains", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Journal of Communication", |
|
"volume": "64", |
|
"issue": "4", |
|
"pages": "658--679", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kevin Coe, Kate Kenski, and Stephen A Rains. 2014. Online and uncivil? patterns and determinants of in- civility in newspaper website comments. Journal of Communication, 64(4):658-679.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Automated Hate Speech Detection and the Problem of Offensive Language", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Davidson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dana", |
|
"middle": [], |
|
"last": "Warmsley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Macy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ingmar", |
|
"middle": [], |
|
"last": "Weber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 11th International AAAI Conference on Web and Social Media, ICWSM '17", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "512--515", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Davidson, Dana Warmsley, Michael Macy, and Ingmar Weber. 2017. Automated Hate Speech Detection and the Problem of Offensive Language. In Proceedings of the 11th International AAAI Con- ference on Web and Social Media, ICWSM '17, pages 512-515.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Automatically Detecting Incivility in Online Discussions of News Media", |
|
"authors": [ |
|
{ |
|
"first": "Johannes", |
|
"middle": [], |
|
"last": "Daxenberger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc", |
|
"middle": [], |
|
"last": "Ziegele", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oliver", |
|
"middle": [], |
|
"last": "Quiring", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "2018 IEEE 14th International Conference on e-Science (e-Science)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "318--319", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Johannes Daxenberger, Marc Ziegele, Iryna Gurevych, and Oliver Quiring. 2018. Automatically Detect- ing Incivility in Online Discussions of News Media. In 2018 IEEE 14th International Conference on e- Science (e-Science), pages 318-319. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Un- derstanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Incivility online: Affective and behavioral reactions to uncivil political posts in a web-based experiment", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Bryan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gervais", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Journal of Information Technology & Politics", |
|
"volume": "12", |
|
"issue": "2", |
|
"pages": "167--185", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bryan T Gervais. 2015. Incivility online: Affective and behavioral reactions to uncivil political posts in a web-based experiment. Journal of Information Tech- nology & Politics, 12(2):167-185.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Meaner on mobile: Incivility and impoliteness in communicating contentious politics on sociotechnical networks", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Groshek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chelsea", |
|
"middle": [], |
|
"last": "Cutino", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Social Media+ Society", |
|
"volume": "2", |
|
"issue": "4", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Groshek and Chelsea Cutino. 2016. Meaner on mobile: Incivility and impoliteness in communicat- ing contentious politics on sociotechnical networks. Social Media+ Society, 2(4):2056305116677137.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "AdamW and Super-convergence is now the fastest way to train neural nets", |
|
"authors": [ |
|
{ |
|
"first": "Sylvain", |
|
"middle": [], |
|
"last": "Gugger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeremy", |
|
"middle": [], |
|
"last": "Howard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sylvain Gugger and Jeremy Howard. 2018. AdamW and Super-convergence is now the fastest way to train neural nets. https://www.fast.ai/2018/ 07/02/adam-weight-decay/.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "ADASYN: Adaptive synthetic sampling approach for imbalanced learning", |
|
"authors": [ |
|
{ |
|
"first": "Haibo", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Bai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edwardo", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Garcia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shutao", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "IEEE international joint conference on neural networks (IEEE world congress on computational intelligence)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1322--1328", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Haibo He, Yang Bai, Edwardo A Garcia, and Shutao Li. 2008. ADASYN: Adaptive synthetic sam- pling approach for imbalanced learning. In 2008 IEEE international joint conference on neural net- works (IEEE world congress on computational intel- ligence), pages 1322-1328. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Alexu-backtranslation-tl at semeval-2020 task [12]: Improving offensive language detection using data augmentation and transfer learning", |
|
"authors": [ |
|
{ |
|
"first": "Mai", |
|
"middle": [], |
|
"last": "Ibrahim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marwan", |
|
"middle": [], |
|
"last": "Torki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nagwa", |
|
"middle": [], |
|
"last": "El-Makky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the International Workshop on Semantic Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mai Ibrahim, Marwan Torki, and Nagwa El-Makky. 2020. Alexu-backtranslation-tl at semeval-2020 task [12]: Improving offensive language detection using data augmentation and transfer learning. In Proceedings of the International Workshop on Se- mantic Evaluation (SemEval).", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "The origins and consequences of affective polarization in the United States", |
|
"authors": [ |
|
{ |
|
"first": "Shanto", |
|
"middle": [], |
|
"last": "Iyengar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yphtach", |
|
"middle": [], |
|
"last": "Lelkes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Levendusky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Neil", |
|
"middle": [], |
|
"last": "Malhotra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sean", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Westwood", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Annual Review of Political Science", |
|
"volume": "22", |
|
"issue": "", |
|
"pages": "129--146", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shanto Iyengar, Yphtach Lelkes, Matthew Levendusky, Neil Malhotra, and Sean J Westwood. 2019. The ori- gins and consequences of affective polarization in the United States. Annual Review of Political Sci- ence, 22:129-146.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "What Does BERT Learn about the Structure of Language?", |
|
"authors": [ |
|
{ |
|
"first": "Ganesh", |
|
"middle": [], |
|
"last": "Jawahar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Beno\u00eet", |
|
"middle": [], |
|
"last": "Sagot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Djam\u00e9", |
|
"middle": [], |
|
"last": "Seddah", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3651--3657", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ganesh Jawahar, Beno\u00eet Sagot, and Djam\u00e9 Seddah. 2019. What Does BERT Learn about the Structure of Language? In Proceedings of the 57th Annual Meeting of the Association for Computational Lin- guistics, pages 3651-3657.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Automated Identification of Verbally Abusive Behaviors in Online Discussions", |
|
"authors": [ |
|
{ |
|
"first": "Srecko", |
|
"middle": [], |
|
"last": "Joksimovic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Ryan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaclyn", |
|
"middle": [], |
|
"last": "Baker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juan", |
|
"middle": [], |
|
"last": "Ocumpaugh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Miguel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Andres", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elle", |
|
"middle": [ |
|
"Yuan" |
|
], |
|
"last": "Tot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shane", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dawson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Third Workshop on Abusive Language Online", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "36--45", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Srecko Joksimovic, Ryan S Baker, Jaclyn Ocumpaugh, Juan Miguel L Andres, Ivan Tot, Elle Yuan Wang, and Shane Dawson. 2019. Automated Identifica- tion of Verbally Abusive Behaviors in Online Dis- cussions. In Proceedings of the Third Workshop on Abusive Language Online, pages 36-45.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Civility at work and in our public squares", |
|
"authors": [ |
|
{ |
|
"first": "Powell", |
|
"middle": [], |
|
"last": "Tate", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Krc", |
|
"middle": [], |
|
"last": "Research", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weber", |
|
"middle": [], |
|
"last": "Shandwick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Powell Tate KRC Research, Weber Shandwick. 2018. Civility in America 2018: Civility at work and in our public squares. https://www.webershandwick. com/wp-content/uploads/2018/06/ Civility-in-America-VII-FINAL.pdf.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Opinion conflicts: An effective route to detect incivility in Twitter", |
|
"authors": [ |
|
{ |
|
"first": "Aishik", |
|
"middle": [], |
|
"last": "Suman Kalyan Maity", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pawan", |
|
"middle": [], |
|
"last": "Chakraborty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Animesh", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mukherjee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the ACM on Human-Computer Interaction", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "1--27", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Suman Kalyan Maity, Aishik Chakraborty, Pawan Goyal, and Animesh Mukherjee. 2018. Opinion con- flicts: An effective route to detect incivility in Twit- ter. Proceedings of the ACM on Human-Computer Interaction, 2(CSCW):1-27.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "The impact of toxic language on the health of Reddit communities", |
|
"authors": [ |
|
{ |
|
"first": "Shruthi", |
|
"middle": [], |
|
"last": "Mohan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Apala", |
|
"middle": [], |
|
"last": "Guha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Harris", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fred", |
|
"middle": [], |
|
"last": "Popowich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashley", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Priebe", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Canadian Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "51--56", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shruthi Mohan, Apala Guha, Michael Harris, Fred Popowich, Ashley Schuster, and Chris Priebe. 2017. The impact of toxic language on the health of Reddit communities. In Canadian Conference on Artificial Intelligence, pages 51-56. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Flaming on YouTube", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ard", |
|
"middle": [], |
|
"last": "Moor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ria", |
|
"middle": [], |
|
"last": "Heuvelman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Verleur", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Computers in human behavior", |
|
"volume": "26", |
|
"issue": "6", |
|
"pages": "1536--1546", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter J Moor, Ard Heuvelman, and Ria Verleur. 2010. Flaming on YouTube. Computers in human behav- ior, 26(6):1536-1546.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Incentivizing the manuscriptreview system using REX", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Diana", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mutz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Political Science & Politics", |
|
"volume": "48", |
|
"issue": "S1", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diana C Mutz. 2015. Incentivizing the manuscript- review system using REX. PS, Political Science & Politics, 48(S1):73.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "The new videomalaise: Effects of televised incivility on political trust", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Diana", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Byron", |
|
"middle": [], |
|
"last": "Mutz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Reeves", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "American Political Science Review", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--15", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diana C Mutz and Byron Reeves. 2005. The new videomalaise: Effects of televised incivility on polit- ical trust. American Political Science Review, pages 1-15.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Nikolov-Radivchev at SemEval-2019 Task 6: Offensive tweet classification with BERT and ensembles", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Nikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Radivchev", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 13th International Workshop on Semantic Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "691--695", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Nikolov and Victor Radivchev. 2019. Nikolov- Radivchev at SemEval-2019 Task 6: Offensive tweet classification with BERT and ensembles. In Pro- ceedings of the 13th International Workshop on Se- mantic Evaluation, pages 691-695.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Democracy online: Civility, politeness, and the democratic potential of online political discussion groups", |
|
"authors": [ |
|
{ |
|
"first": "Zizi", |
|
"middle": [], |
|
"last": "Papacharissi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "New media & society", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "259--283", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zizi Papacharissi. 2004. Democracy online: Civility, politeness, and the democratic potential of online political discussion groups. New media & society, 6(2):259-283.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Reddit's monthly active user base grew 30% to reach 430M", |
|
"authors": [ |
|
{ |
|
"first": "Sarah", |
|
"middle": [], |
|
"last": "Perez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sarah Perez. 2019. Reddit's monthly active user base grew 30% to reach 430M in 2019.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Special section introduction-ubiquitous IT and digital vulnerabilities", |
|
"authors": [ |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Ransbotham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ram", |
|
"middle": [], |
|
"last": "Robert G Fichman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alok", |
|
"middle": [], |
|
"last": "Gopal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Systems Research", |
|
"volume": "27", |
|
"issue": "4", |
|
"pages": "834--847", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sam Ransbotham, Robert G Fichman, Ram Gopal, and Alok Gupta. 2016. Special section introduc- tion-ubiquitous IT and digital vulnerabilities. In- formation Systems Research, 27(4):834-847.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Free press vs. free speech? The rhetoric of \"civility", |
|
"authors": [ |
|
{ |
|
"first": "Bill", |
|
"middle": [], |
|
"last": "Reader", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "89", |
|
"issue": "", |
|
"pages": "495--513", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bill Reader. 2012. Free press vs. free speech? The rhetoric of \"civility\" in regard to anonymous on- line comments. Journalism & mass communication quarterly, 89(3):495-513.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "hpiDEDIS at GermEval 2019: Offensive Language Identification using a German BERT model", |
|
"authors": [ |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Risch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anke", |
|
"middle": [], |
|
"last": "Stoll", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc", |
|
"middle": [], |
|
"last": "Ziegele", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralf", |
|
"middle": [], |
|
"last": "Krestel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "KONVENS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Julian Risch, Anke Stoll, Marc Ziegele, and Ralf Kres- tel. 2019. hpiDEDIS at GermEval 2019: Offen- sive Language Identification using a German BERT model. In KONVENS.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Augment to prevent: short-text data augmentation in deep learning for hate-speech classification", |
|
"authors": [ |
|
{ |
|
"first": "Georgios", |
|
"middle": [], |
|
"last": "Rizos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Konstantin", |
|
"middle": [], |
|
"last": "Hemker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bj\u00f6rn", |
|
"middle": [], |
|
"last": "Schuller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 28th ACM International Conference on Information and Knowledge Management", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "991--1000", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Georgios Rizos, Konstantin Hemker, and Bj\u00f6rn Schuller. 2019. Augment to prevent: short-text data augmentation in deep learning for hate-speech clas- sification. In Proceedings of the 28th ACM Inter- national Conference on Information and Knowledge Management, pages 991-1000.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Verbal venting in the social web: Effects of anonymity and group norms on aggressive language use in online comments", |
|
"authors": [ |
|
{ |
|
"first": "Leonie", |
|
"middle": [], |
|
"last": "R\u00f6sner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicole", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Kr\u00e4mer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Social Media+ Society", |
|
"volume": "2", |
|
"issue": "3", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leonie R\u00f6sner and Nicole C Kr\u00e4mer. 2016. Verbal venting in the social web: Effects of anonymity and group norms on aggressive language use in online comments. Social Media+ Society, 2(3):2056305116664220.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Beyond Incivility: Understanding Patterns of Uncivil and Intolerant Discourse in Online Political Talk", |
|
"authors": [], |
|
"year": null, |
|
"venue": "Communication Research", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Patr\u00edcia Rossini. 2020. Beyond Incivility: Understand- ing Patterns of Uncivil and Intolerant Discourse in Online Political Talk. Communication Research, page 0093650220921314.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Incivility Detection in Online Comments", |
|
"authors": [ |
|
{ |
|
"first": "Farig", |
|
"middle": [], |
|
"last": "Sadeque", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Rains", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yotam", |
|
"middle": [], |
|
"last": "Shmargad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kate", |
|
"middle": [], |
|
"last": "Kenski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Coe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Bethard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Eighth Joint Conference on Lexical and Computational Semantics (* SEM 2019)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "283--291", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Farig Sadeque, Stephen Rains, Yotam Shmargad, Kate Kenski, Kevin Coe, and Steven Bethard. 2019. In- civility Detection in Online Comments. In Proceed- ings of the Eighth Joint Conference on Lexical and Computational Semantics (* SEM 2019), pages 283- 291.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter", |
|
"authors": [ |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1910.01108" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Victor Sanh, Lysandre Debut, Julien Chaumond, and Thomas Wolf. 2019. DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter. arXiv preprint arXiv:1910.01108.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Virtuous or vitriolic: The effect of anonymity on civility in online newspaper reader comment boards", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Arthur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Santana", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Journalism practice", |
|
"volume": "8", |
|
"issue": "1", |
|
"pages": "18--33", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arthur D Santana. 2014. Virtuous or vitriolic: The effect of anonymity on civility in online newspa- per reader comment boards. Journalism practice, 8(1):18-33.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Measuring the prevalence of online hate speech", |
|
"authors": [ |
|
{ |
|
"first": "Alexandra", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Siegel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evgenii", |
|
"middle": [], |
|
"last": "Nikitin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pablo", |
|
"middle": [], |
|
"last": "Barber\u00e1", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joanna", |
|
"middle": [], |
|
"last": "Sterling", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bethany", |
|
"middle": [], |
|
"last": "Pullen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Bonneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Nagler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joshua", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Tucker", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexandra A Siegel, Evgenii Nikitin, Pablo Bar- ber\u00e1, Joanna Sterling, Bethany Pullen, Richard Bonneau, Jonathan Nagler, and Joshua A Tucker. 2018. Measuring the prevalence of online hate speech, with an application to the 2016 US elec- tion.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "How to fine-tune BERT for text classification?", |
|
"authors": [ |
|
{ |
|
"first": "Chi", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xipeng", |
|
"middle": [], |
|
"last": "Qiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yige", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xuanjing", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "China National Conference on Chinese Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "194--206", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chi Sun, Xipeng Qiu, Yige Xu, and Xuanjing Huang. 2019. How to fine-tune BERT for text classification? In China National Conference on Chinese Computa- tional Linguistics, pages 194-206. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "The Dynamics of Political Incivility on Twitter", |
|
"authors": [ |
|
{ |
|
"first": "Yannis", |
|
"middle": [], |
|
"last": "Theocharis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pablo", |
|
"middle": [], |
|
"last": "Barber\u00e1", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zolt\u00e1n", |
|
"middle": [], |
|
"last": "Fazekas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [ |
|
"Adrian" |
|
], |
|
"last": "Popa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "10", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yannis Theocharis, Pablo Barber\u00e1, Zolt\u00e1n Fazekas, and Sebastian Adrian Popa. 2020. The Dynam- ics of Political Incivility on Twitter. Sage Open, 10(2):2158244020919447.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Predicting the Type and Target of Offensive Posts in Social Media", |
|
"authors": [ |
|
{ |
|
"first": "Marcos", |
|
"middle": [], |
|
"last": "Zampieri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shervin", |
|
"middle": [], |
|
"last": "Malmasi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Rosenthal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noura", |
|
"middle": [], |
|
"last": "Farra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ritesh", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1415--1420", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcos Zampieri, Shervin Malmasi, Preslav Nakov, Sara Rosenthal, Noura Farra, and Ritesh Kumar. 2019. Predicting the Type and Target of Offensive Posts in Social Media. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 1415-1420.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Zeses Pitenis, and \u00c7 agr\u0131 \u00c7\u00f6ltekin. 2020. Semeval-2020 task 12: Multilingual offensive language identification in social media (offenseval 2020)", |
|
"authors": [ |
|
{ |
|
"first": "Marcos", |
|
"middle": [], |
|
"last": "Zampieri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Rosenthal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pepa", |
|
"middle": [], |
|
"last": "Atanasova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Georgi", |
|
"middle": [], |
|
"last": "Karadzhov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hamdy", |
|
"middle": [], |
|
"last": "Mubarak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leon", |
|
"middle": [], |
|
"last": "Derczynski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2006.07235" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcos Zampieri, Preslav Nakov, Sara Rosenthal, Pepa Atanasova, Georgi Karadzhov, Hamdy Mubarak, Leon Derczynski, Zeses Pitenis, and \u00c7 agr\u0131 \u00c7\u00f6ltekin. 2020. Semeval-2020 task 12: Multilingual offensive language identification in social media (offenseval 2020). arXiv preprint arXiv:2006.07235.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF0": { |
|
"text": "", |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table" |
|
}, |
|
"TABREF2": { |
|
"text": "Results -Logistic Regression Models on Reddit Test Data", |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td>Training Data</td><td colspan=\"2\">Precision Recall</td><td>F 1</td></tr><tr><td>Synthetic Reddit</td><td>0.872</td><td colspan=\"2\">0.158 0.267</td></tr><tr><td>Reddit + Twitter</td><td>0.711</td><td colspan=\"2\">0.474 0.569</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"text": "", |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td>: Results -Logistic Regression Models on Twit-</td></tr><tr><td>ter Data</td></tr></table>", |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |