updated hate dataset
Browse files- data/tweet_hate/test.jsonl +0 -0
- data/tweet_hate/train.jsonl +0 -0
- data/tweet_hate/validation.jsonl +0 -0
- process/tweet_hate.py +9 -6
- super_tweet_eval.py +35 -0
data/tweet_hate/test.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/tweet_hate/train.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/tweet_hate/validation.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
process/tweet_hate.py
CHANGED
@@ -15,7 +15,8 @@ class_mapping = {
|
|
15 |
'target_religion_aggregated': 3,
|
16 |
'target_origin_aggregated': 4,
|
17 |
'target_disability_aggregated': 5,
|
18 |
-
'target_age_aggregated': 6
|
|
|
19 |
}
|
20 |
|
21 |
|
@@ -44,7 +45,7 @@ def clean_text(text):
|
|
44 |
|
45 |
|
46 |
# load data
|
47 |
-
dataset = load_dataset('ucberkeley-dlab/measuring-hate-speech'
|
48 |
df = dataset['train'].to_pandas()
|
49 |
|
50 |
# get label
|
@@ -62,7 +63,7 @@ df_count_label = df_count_label.rename(columns={'annon_label': 'count'})
|
|
62 |
df_count_label = df_count_label.reset_index(level=1)
|
63 |
df_count_label = df_count_label[df_count_label['count'] >= 2]
|
64 |
|
65 |
-
# map label
|
66 |
df = df.set_index('comment_id')
|
67 |
df['label'] = None
|
68 |
df['label'] = df_count_label['annon_label']
|
@@ -71,7 +72,7 @@ df['label'] = df_count_label['annon_label']
|
|
71 |
df = df[df['label'].notnull()]
|
72 |
df = df.reset_index()
|
73 |
|
74 |
-
# find aggrement on
|
75 |
targets = ['target_race', 'target_religion', 'target_origin', 'target_gender',
|
76 |
'target_sexuality', 'target_age', 'target_disability']
|
77 |
|
@@ -107,11 +108,13 @@ df = df.reset_index()
|
|
107 |
|
108 |
|
109 |
# clean multiclass
|
110 |
-
# only tweets with 1
|
111 |
idx_multiclass = df[df['label'] == 1].index
|
|
|
112 |
|
113 |
# initialize column
|
114 |
df['gold_label'] = None
|
|
|
115 |
df.loc[idx_multiclass, 'gold_label'] = df.loc[idx_multiclass]['target']
|
116 |
|
117 |
# drop entries without target
|
@@ -136,7 +139,7 @@ train, test = train_test_split(df, test_size=test_size, stratify=df['gold_label'
|
|
136 |
train, val = train_test_split(train, test_size=val_size, stratify=train['gold_label'].values, random_state=4)
|
137 |
|
138 |
# save splits
|
139 |
-
cols_to_keep = ['
|
140 |
train[cols_to_keep].to_json('../data/tweet_hate/train.jsonl', lines=True, orient='records')
|
141 |
val[cols_to_keep].to_json('../data/tweet_hate/validation.jsonl', lines=True, orient='records')
|
142 |
test[cols_to_keep].to_json('../data/tweet_hate/test.jsonl', lines=True, orient='records')
|
|
|
15 |
'target_religion_aggregated': 3,
|
16 |
'target_origin_aggregated': 4,
|
17 |
'target_disability_aggregated': 5,
|
18 |
+
'target_age_aggregated': 6,
|
19 |
+
'not_hate': 7
|
20 |
}
|
21 |
|
22 |
|
|
|
45 |
|
46 |
|
47 |
# load data
|
48 |
+
dataset = load_dataset('ucberkeley-dlab/measuring-hate-speech')
|
49 |
df = dataset['train'].to_pandas()
|
50 |
|
51 |
# get label
|
|
|
63 |
df_count_label = df_count_label.reset_index(level=1)
|
64 |
df_count_label = df_count_label[df_count_label['count'] >= 2]
|
65 |
|
66 |
+
# map binary label
|
67 |
df = df.set_index('comment_id')
|
68 |
df['label'] = None
|
69 |
df['label'] = df_count_label['annon_label']
|
|
|
72 |
df = df[df['label'].notnull()]
|
73 |
df = df.reset_index()
|
74 |
|
75 |
+
# find aggrement on targets
|
76 |
targets = ['target_race', 'target_religion', 'target_origin', 'target_gender',
|
77 |
'target_sexuality', 'target_age', 'target_disability']
|
78 |
|
|
|
108 |
|
109 |
|
110 |
# clean multiclass
|
111 |
+
# give target only to tweets with 1 (is hate speech) target
|
112 |
idx_multiclass = df[df['label'] == 1].index
|
113 |
+
idx_not_hate = df[df['label'] == 0].index
|
114 |
|
115 |
# initialize column
|
116 |
df['gold_label'] = None
|
117 |
+
df.loc[idx_not_hate, 'gold_label'] = 'not_hate'
|
118 |
df.loc[idx_multiclass, 'gold_label'] = df.loc[idx_multiclass]['target']
|
119 |
|
120 |
# drop entries without target
|
|
|
139 |
train, val = train_test_split(train, test_size=val_size, stratify=train['gold_label'].values, random_state=4)
|
140 |
|
141 |
# save splits
|
142 |
+
cols_to_keep = ['gold_label', 'text']
|
143 |
train[cols_to_keep].to_json('../data/tweet_hate/train.jsonl', lines=True, orient='records')
|
144 |
val[cols_to_keep].to_json('../data/tweet_hate/validation.jsonl', lines=True, orient='records')
|
145 |
test[cols_to_keep].to_json('../data/tweet_hate/test.jsonl', lines=True, orient='records')
|
super_tweet_eval.py
CHANGED
@@ -102,6 +102,27 @@ _TEMPO_WIC_CITATION = """\
|
|
102 |
abstract = "Language evolves over time, and word meaning changes accordingly. This is especially true in social media, since its dynamic nature leads to faster semantic shifts, making it challenging for NLP models to deal with new content and trends. However, the number of datasets and models that specifically address the dynamic nature of these social platforms is scarce. To bridge this gap, we present TempoWiC, a new benchmark especially aimed at accelerating research in social media-based meaning shift. Our results show that TempoWiC is a challenging benchmark, even for recently-released language models specialized in social media.",
|
103 |
}
|
104 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
|
106 |
|
107 |
class SuperTweetEvalConfig(datasets.BuilderConfig):
|
@@ -175,6 +196,13 @@ class SuperTweetEval(datasets.GeneratorBasedBuilder):
|
|
175 |
'text_1', 'text_1_tokenized', 'token_idx_1', 'date_1',
|
176 |
'text_2', 'text_2_tokenized', 'token_idx_2', 'date_2'],
|
177 |
data_url="https://huggingface.co/datasets/cardiffnlp/super_tweet_eval/resolve/main/data/tempo_wic",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
178 |
)
|
179 |
]
|
180 |
|
@@ -201,6 +229,13 @@ class SuperTweetEval(datasets.GeneratorBasedBuilder):
|
|
201 |
features["token_idx_2"] = datasets.Value("int32")
|
202 |
features["text_1_tokenized"] = datasets.Sequence(datasets.Value("string"))
|
203 |
features["text_2_tokenized"] = datasets.Sequence(datasets.Value("string"))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
204 |
|
205 |
return datasets.DatasetInfo(
|
206 |
description=_SUPER_TWEET_EVAL_DESCRIPTION + "\n" + self.config.description,
|
|
|
102 |
abstract = "Language evolves over time, and word meaning changes accordingly. This is especially true in social media, since its dynamic nature leads to faster semantic shifts, making it challenging for NLP models to deal with new content and trends. However, the number of datasets and models that specifically address the dynamic nature of these social platforms is scarce. To bridge this gap, we present TempoWiC, a new benchmark especially aimed at accelerating research in social media-based meaning shift. Our results show that TempoWiC is a challenging benchmark, even for recently-released language models specialized in social media.",
|
103 |
}
|
104 |
"""
|
105 |
+
_TWEET_HATE_DESCRIPTION = """TBA"""
|
106 |
+
_TWEET_HATE_CITATION = """\
|
107 |
+
@inproceedings{sachdeva-etal-2022-measuring,
|
108 |
+
title = "The Measuring Hate Speech Corpus: Leveraging Rasch Measurement Theory for Data Perspectivism",
|
109 |
+
author = "Sachdeva, Pratik and
|
110 |
+
Barreto, Renata and
|
111 |
+
Bacon, Geoff and
|
112 |
+
Sahn, Alexander and
|
113 |
+
von Vacano, Claudia and
|
114 |
+
Kennedy, Chris",
|
115 |
+
booktitle = "Proceedings of the 1st Workshop on Perspectivist Approaches to NLP @LREC2022",
|
116 |
+
month = jun,
|
117 |
+
year = "2022",
|
118 |
+
address = "Marseille, France",
|
119 |
+
publisher = "European Language Resources Association",
|
120 |
+
url = "https://aclanthology.org/2022.nlperspectives-1.11",
|
121 |
+
pages = "83--94",
|
122 |
+
abstract = "We introduce the Measuring Hate Speech corpus, a dataset created to measure hate speech while adjusting for annotators{'} perspectives. It consists of 50,070 social media comments spanning YouTube, Reddit, and Twitter, labeled by 11,143 annotators recruited from Amazon Mechanical Turk. Each observation includes 10 ordinal labels: sentiment, disrespect, insult, attacking/defending, humiliation, inferior/superior status, dehumanization, violence, genocide, and a 3-valued hate speech benchmark label. The labels are aggregated using faceted Rasch measurement theory (RMT) into a continuous score that measures each comment{'}s location on a hate speech spectrum. The annotation experimental design assigned comments to multiple annotators in order to yield a linked network, allowing annotator disagreement (perspective) to be statistically summarized. Annotators{'} labeling strictness was estimated during the RMT scaling, projecting their perspective onto a linear measure that was adjusted for the hate speech score. Models that incorporate this annotator perspective parameter as an auxiliary input can generate label- and score-level predictions conditional on annotator perspective. The corpus includes the identity group targets of each comment (8 groups, 42 subgroups) and annotator demographics (6 groups, 40 subgroups), facilitating analyses of interactions between annotator- and comment-level identities, i.e. identity-related annotator perspective.",
|
123 |
+
}
|
124 |
+
"""
|
125 |
+
|
126 |
|
127 |
|
128 |
class SuperTweetEvalConfig(datasets.BuilderConfig):
|
|
|
196 |
'text_1', 'text_1_tokenized', 'token_idx_1', 'date_1',
|
197 |
'text_2', 'text_2_tokenized', 'token_idx_2', 'date_2'],
|
198 |
data_url="https://huggingface.co/datasets/cardiffnlp/super_tweet_eval/resolve/main/data/tempo_wic",
|
199 |
+
),
|
200 |
+
SuperTweetEvalConfig(
|
201 |
+
name="tweet_hate",
|
202 |
+
description=_TWEET_HATE_DESCRIPTION,
|
203 |
+
citation=_TWEET_HATE_CITATION,
|
204 |
+
features=['gold_label', 'text'],
|
205 |
+
data_url="https://huggingface.co/datasets/cardiffnlp/super_tweet_eval/resolve/main/data/tweet_hate",
|
206 |
)
|
207 |
]
|
208 |
|
|
|
229 |
features["token_idx_2"] = datasets.Value("int32")
|
230 |
features["text_1_tokenized"] = datasets.Sequence(datasets.Value("string"))
|
231 |
features["text_2_tokenized"] = datasets.Sequence(datasets.Value("string"))
|
232 |
+
if self.config.name == "tweet_hate":
|
233 |
+
names = [
|
234 |
+
'target_gender_aggregated','target_race_aggregated', 'target_sexuality_aggregated',
|
235 |
+
'target_religion_aggregated','target_origin_aggregated', 'target_disability_aggregated','target_age_aggregated',
|
236 |
+
'not_hate']
|
237 |
+
features["gold_label"] = datasets.Value("int32")
|
238 |
+
features["text"] = datasets.Value("string")
|
239 |
|
240 |
return datasets.DatasetInfo(
|
241 |
description=_SUPER_TWEET_EVAL_DESCRIPTION + "\n" + self.config.description,
|