super_tweeteval / process /tweet_sentiment.py
antypasd's picture
nerd: changed names; sent: changed topic to target
4e3e08b
raw
history blame
2.28 kB
# Original data: https://alt.qcri.org/semeval2017/task4/index.php?id=results
import pandas as pd
from glob import glob
import urllib
# format text
def clean_text(text):
text = text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ')
new_text = []
for t in text.split():
# MAKE SURE to check lowercase
t = '@user' if t.startswith('@') and len(t) > 1 and t.replace(
'@', '').lower() not in verified_users else t
t = '{URL}' if t.startswith('http') else t
new_text.append(t)
return ' '.join(new_text)
# test set
with open('./SemEval2017-task4-test.subtask-CE.english.txt') as f:
test_lines = f.readlines()
test = [x.split('\t') for x in test_lines]
test = pd.DataFrame(test, columns=['id', 'topic', 'gold_label', 'text'])
# validation set
fnames = ['twitter-2016dev-CE.txt', 'twitter-2016devtest-CE.txt']
validation_lines = []
for input_f in fnames:
with open(input_f) as f:
lines = f.readlines()
validation_lines.extend(lines)
validation = [x.split('\t') for x in validation_lines]
validation = pd.DataFrame(
validation, columns=['id', 'topic', 'gold_label', 'text'])
# train set
fnames = ['./twitter-2016train-CE.txt', './twitter-2016test-CE.txt']
train_lines = []
for input_f in fnames:
with open(input_f) as f:
lines = f.readlines()
train_lines.extend(lines)
train = [x.split('\t') for x in train_lines]
train = pd.DataFrame(
train, columns=['id', 'topic', 'gold_label', 'text'])
# clean text
verified_users = urllib.request.urlopen(
'https://raw.githubusercontent.com/cardiffnlp/timelms/main/data/verified_users.v091122.txt').readlines()
verified_users = [x.decode().strip('\n').lower() for x in verified_users]
train['text'] = train['text'].apply(clean_text)
validation['text'] = validation['text'].apply(clean_text)
test['text'] = test['text'].apply(clean_text)
# save splits
cols_to_keep = ['gold_label', 'topic', 'text']
train[cols_to_keep].to_json(
'../data/tweet_sentiment/train.jsonl', lines=True, orient='records')
validation[cols_to_keep].to_json(
'../data/tweet_sentiment/validation.jsonl', lines=True, orient='records')
test[cols_to_keep].to_json(
'../data/tweet_sentiment/test.jsonl', lines=True, orient='records')