super_tweeteval / process /tweet_hate.py
antypasd's picture
bug fix: hate
b26e2c1
raw
history blame
4.83 kB
import pandas as pd
from datasets import load_dataset
from sklearn.model_selection import train_test_split
import urllib
# Set binary labels
HATE = 1
NOT_HATE = 0
# class mapping for the multi-class
class_mapping = {
'target_gender_aggregated': 0,
'target_race_aggregated': 1,
'target_sexuality_aggregated': 2,
'target_religion_aggregated': 3,
'target_origin_aggregated': 4,
'target_disability_aggregated': 5,
'target_age_aggregated': 6,
'not_hate': 7
}
# map continuous score to classes
def map_label(x):
if x >= -1 and x <= 0.5:
label = 999 # neutral/ambiguous
elif x > 0.5:
label = HATE # hate
elif x < -1:
label = NOT_HATE # not hate
return label
# format text
def clean_text(text):
text = text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ')
new_text = []
for t in text.split():
# MAKE SURE to check lowercase
t = '@user' if t.startswith('@') and len(t) > 1 and t.replace('@','').lower() not in verified_users else t
t = '{URL}' if (t.startswith('http') or t.startswith('URL')) else t
new_text.append(t)
return ' '.join(new_text)
# load data
dataset = load_dataset('ucberkeley-dlab/measuring-hate-speech')
df = dataset['train'].to_pandas()
# get label
df['annon_label'] = df['hate_speech_score'].apply(map_label)
# keep only entries from Twitter
df = df[df['platform'] == 2]
# ignore ambiguous
df = df[df['annon_label'].isin([HATE, NOT_HATE])]
# count binary label
df_count_label = pd.DataFrame(df.groupby('comment_id')['annon_label'].value_counts())
df_count_label = df_count_label.rename(columns={'annon_label': 'count'})
df_count_label = df_count_label.reset_index(level=1)
df_count_label = df_count_label[df_count_label['count'] >= 2]
# map binary label
df = df.set_index('comment_id')
df['label'] = None
df['label'] = df_count_label['annon_label']
# drop entries with no agreement
df = df[df['label'].notnull()]
df = df.reset_index()
# find aggrement on targets
targets = ['target_race', 'target_religion', 'target_origin', 'target_gender',
'target_sexuality', 'target_age', 'target_disability']
# for each target count aggrement
for t in targets:
# count and consider only targets with at least 2 coders
df_count_targets = pd.DataFrame(df.groupby('comment_id')[t].value_counts())
df_count_targets = df_count_targets.rename(columns={t: 'count'})
df_count_targets = df_count_targets.reset_index(level=1)
df_count_targets = df_count_targets[df_count_targets['count'] >= 2]
# do not consider entries with more than one target (because of more than 3 coders)
df_count_targets = df_count_targets.loc[df_count_targets.index.drop_duplicates(keep=False)]
# map aggregated target
df = df.set_index('comment_id')
df[f'{t}_aggregated'] = False
df[f'{t}_aggregated'] = df_count_targets[t]
df[f'{t}_aggregated'] = df[f'{t}_aggregated'].fillna(False)
df = df.reset_index()
# aggregate targets
targets_aggregated = [f'{t}_aggregated' for t in targets]
# get columns/target which are True
df['target'] = df[targets_aggregated].apply(lambda row: row[row].index, axis=1)
# set target only if it is unique
df['target'] = df['target'].apply(lambda x: x[0] if len(x) == 1 else None)
# no need of all annotators now -> keep each tweet only once
df = df.groupby('comment_id').nth(0)
df = df.reset_index()
# clean multiclass
# give target only to tweets with 1 (is hate speech) target
idx_multiclass = df[df['label'] == 1].index
idx_not_hate = df[df['label'] == 0].index
# initialize column
df['gold_label'] = None
df.loc[idx_not_hate, 'gold_label'] = 'not_hate'
df.loc[idx_multiclass, 'gold_label'] = df.loc[idx_multiclass]['target']
# drop entries without target
df = df.dropna(subset='gold_label')
# get list of known verified users
verified_users = urllib.request.urlopen('https://raw.githubusercontent.com/cardiffnlp/timelms/main/data/verified_users.v091122.txt').readlines()
verified_users = [x.decode().strip('\n').lower() for x in verified_users]
# clean text
df['text'] = df['text'].apply(clean_text)
# map classes
df['gold_label'] = df['gold_label'].map(class_mapping)
# create splits
test_size = int(0.2 * len(df))
val_size = int(0.1 * len(df))
train, test = train_test_split(df, test_size=test_size, stratify=df['gold_label'].values, random_state=4)
train, val = train_test_split(train, test_size=val_size, stratify=train['gold_label'].values, random_state=4)
# save splits
cols_to_keep = ['gold_label', 'text']
train[cols_to_keep].to_json('../data/tweet_hate/train.jsonl', lines=True, orient='records')
val[cols_to_keep].to_json('../data/tweet_hate/validation.jsonl', lines=True, orient='records')
test[cols_to_keep].to_json('../data/tweet_hate/test.jsonl', lines=True, orient='records')