super_tweeteval / evaluation /evaluation.py
antypasd's picture
updated README, evaluation
bde3126
raw
history blame
No virus
14.9 kB
import logging
from datasets import load_dataset
from imblearn.metrics import macro_averaged_mean_absolute_error
from sklearn.metrics import f1_score
from evaluate import load
import numpy as np
import argparse
from collections import defaultdict
import json
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
# argument
parser = argparse.ArgumentParser(description='Super TweetEval evaluation script.')
parser.add_argument('-p', '--prediction-path', required=True, type=str,
help="path to directory wiht that contains the model predictions on the test sets. One file per task.")
parser.add_argument('-o', '--output-file', default="scores.json", type=str, help="path to the output file")
parser.add_argument('--t2t-format', action="store_false", default=True, help="format prediction file in T2T format (ONLY for NER7)")
opt = parser.parse_args()
task_names = ['tweet_topic', 'tweet_ner7', 'tweet_qa', 'tweet_qg',
'tweet_intimacy', 'tweet_similarity', 'tempo_wic',
'tweet_hate', 'tweet_nerd', 'tweet_emoji',
'tweet_sentiment', 'tweet_emotion']
scores = defaultdict(lambda : 0) #{k:0 for k in task_names}
not_found = []
for task in task_names:
# load dataset
data = load_dataset("cardiffnlp/super_tweeteval", task, use_auth_token=True, split="test")
try:
if task == 'tempo_wic':
label2id = {"no": 0, "yes": 1}
with open(f"{opt.prediction_path}/tempo-wic.txt") as f:
_predictions = []
output = f.read().split('\n')
for entry in output:
if entry in label2id:
_predictions.append(label2id[entry])
else:
_predictions.append(-1)
gold_labels = data["gold_label_binary"]
eval_metric = {"accuracy": np.mean([int(a == b) for a, b in zip(_predictions, gold_labels)])}
scores[task] = eval_metric["accuracy"]
elif task == "tweet_emoji":
# load label names
with open('../data/tweet_emoji/map.txt') as f:
label_classes = f.readlines()
label_names = [x.strip('\n') for x in label_classes]
label_names = [x.split(',')[1] for x in label_names]
with open(f"{opt.prediction_path}/tweet-emoji.txt") as f:
lines = f.readlines()
lines = [l.strip('\n') for l in lines]
predictions = []
for l in lines:
pred_instance = []
# consider only top 5 predictions
lines = l.split(',') if ',' in l else l.split(' ')
for label in lines[:5]:
label = label.strip(" ,")
if label in label_names:
pred_instance.append(label_names.index(label))
else:
pred_instance.append(-1) # emoji not in label_names
predictions.append(pred_instance)
# metric: accuracy at top 5
gold_labels = np.array(data["gold_label"][:40_000])
eval_metric = {"accuracy_top5": np.mean([1 if gold_labels[i] in predictions[i] else 0 for i in range(len(gold_labels))])}
scores[task] = eval_metric["accuracy_top5"]
elif task == "tweet_emotion":
label_names = data.features['gold_label_list'].feature.names
with open(f"{opt.prediction_path}/tweet-emotion.txt") as f:
lines = f.readlines()
lines = [l.strip('\n') for l in lines]
predictions = []
for l in lines:
pred_instance = [0] * len(label_names)
for label in l.split(','):
label = label.strip(' ')
if label in label_names:
pred_instance[label_names.index(label)] = 1
predictions.append(pred_instance)
# metric
gold_labels = data["gold_label_list"]
eval_metric = {"macro_f1": f1_score(gold_labels, predictions, average='macro')}
scores[task] = eval_metric["macro_f1"]
elif task == "tweet_ner7":
labels = [
'B-corporation', 'B-creative_work', 'B-event', 'B-group', 'B-location', 'B-person', 'B-product',
'I-corporation', 'I-creative_work', 'I-event', 'I-group', 'I-location', 'I-person', 'I-product', 'O'
]
id2label = {i: label for i, label in enumerate(labels)}
true_sequence = [[id2label[i] for i in ii] for ii in data['gold_label_sequence']]
# metric
metric = load("seqeval")
if opt.t2t_format:
# format prediction file in IOB sequence
with open(f"{opt.prediction_path}/tweet-ner7.txt") as f:
lines = f.read().split("\n")
output = [l.strip('\n') for l in lines]
output = [list(set(i.split(","))) for i in output]
prediction_sequence = []
for d, o in zip(data, output):
tag_seq = ['O'] * len(d['text_tokenized'])
for _o in o:
if len(_o.split(":")) != 2:
continue
entity, _type = _o.split(":")
entity_tokens = entity.split(" ")
try:
i = d['text_tokenized'].index(entity_tokens[0])
tag_seq[i] = f"B-{_type.strip()}"
if len(entity_tokens) > 1:
for j in range(1, len(entity_tokens)):
tag_seq[i + j] = f"I-{_type.strip()}"
except:
continue
prediction_sequence.append(tag_seq)
else:
with open(opt.prediction_file) as f:
prediction_sequence = [[id2label[j] if j in id2label else j for j in i.split('\t')] for i in f.read().split("\n")]
eval_metric = metric.compute(predictions=prediction_sequence, references=true_sequence)
eval_metric = {'overall_f1': eval_metric['overall_f1']}
scores[task] = eval_metric['overall_f1']
elif task == "tweet_hate":
label_names = data.features['gold_label'].names
with open(f"{opt.prediction_path}/tweet-hate.txt") as f:
lines = f.readlines()
output = [i.strip('\n') for i in lines]
predictions = []
for x in output:
if x not in label_names:
predictions.append(-1)
else:
predictions.append(label_names.index(x))
gold_labels = data["gold_label"]
# do not consider not_hate class
f1_multi = f1_score(gold_labels, predictions, labels=list(range(7)), average='macro')
# consider all hate subclasses as one class
predictions_binary = [1 if x in list(range(7)) else 0 for x in predictions]
gold_labels_binary = [1 if x in list(range(7)) else 0 for x in gold_labels]
f1_binary = f1_score(gold_labels_binary, predictions_binary, average='micro')
eval_metric = {"combined_f1": (f1_multi+f1_binary)/2}
scores[task] = eval_metric["combined_f1"]
elif task == "tweet_intimacy":
gold_labels = data["gold_score"]
# mean_value to be used if model outputs a non-numeric value
mean_value = sum(gold_labels)/len(gold_labels)
# metric
metric = load("spearmanr")
with open(f"{opt.prediction_path}/tweet-intimacy.txt") as f:
_predictions = []
lines = f.readlines()
output = [l.strip('\n') for l in lines]
for i in output:
try:
_predictions.append(float(i))
except ValueError:
_predictions.append(mean_value)
failed_predictions += 1
corr_spear = metric.compute(predictions=_predictions, references=gold_labels)
eval_metric = {"spearmanr": corr_spear}
scores[task] = eval_metric["spearmanr"]['spearmanr']
elif task == "tweet_nerd":
# metric
label2id = {"no": 0, "yes": 1}
with open(f"{opt.prediction_path}/tweet-nerd.txt") as f:
_predictions = []
output = f.read().split('\n')
output = [x.lower().strip() for x in output]
for entry in output:
if entry in label2id:
_predictions.append(label2id[entry])
else:
_predictions.append(-1)
gold_labels = data["gold_label_binary"]
eval_metric = {"accuracy": np.mean([int(a == b) for a, b in zip(_predictions, gold_labels)])}
scores[task] = eval_metric["accuracy"]
elif task == "tweet_qa":
metric = load("squad")
with open(f"{opt.prediction_path}/tweet-qa.txt") as f:
lines = f.readlines()
output = [l.strip('\n') for l in lines]
_predictions = [{"prediction_text": p, "id": str(_n)} for _n, p in enumerate(output)]
_references = [{"answers": {"answer_start": [100], "text": [r["gold_label_str"]]}, "id": str(_n)} for _n, r in enumerate(data)]
eval_metric = metric.compute(predictions=_predictions, references=_references)
eval_metric.pop("exact_match")
eval_metric["f1"] = eval_metric["f1"]/100
scores[task] = eval_metric["f1"]
elif task == "tweet_qg":
metric = load("meteor")
with open(f"{opt.prediction_path}/tweet-qg.txt") as f:
lines = f.readlines()
_predictions = [l.strip('\n') for l in lines]
_references = data["gold_label_str"]
eval_metric = metric.compute(predictions=_predictions, references=_references)
scores[task] = eval_metric["meteor"]
elif task == "tweet_sentiment":
label_names = data.features['gold_label'].names
with open(f"{opt.prediction_path}/tweet-sentiment.txt") as f:
lines = f.readlines()
output = [l.strip('\n') for l in lines]
predictions = []
# if the model outputs a label that is not in the label set, we set the label to be "neutral or negative" (2)
for x in output:
x = x.strip(' ')
if x not in label_names:
predictions.append(2)
else:
predictions.append(label_names.index(x))
# metric: r2 score
gold_labels = data["gold_label"]
macro_mae = macro_averaged_mean_absolute_error(gold_labels, predictions)
macro_mae = 1 - macro_mae
# set a floor of -1 for worst model
macro_mae = max([-1, macro_mae])
eval_metric = {"macro_mae": macro_mae}
scores[task] = eval_metric["macro_mae"]
elif task == "tweet_similarity":
gold_labels = data["gold_score"]
# mean_value to be used if model outputs a non-numeric value
mean_value = sum(gold_labels)/len(gold_labels)
# metric
metric = load("spearmanr")
with open(f"{opt.prediction_path}/tweet-similarity.txt") as f:
_predictions = []
lines = f.readlines()
output = [l.strip('\n') for l in lines]
for i in output:
try:
_predictions.append(float(i))
except ValueError:
_predictions.append(mean_value)
corr_spear = metric.compute(predictions=_predictions, references=gold_labels)
eval_metric = {"spearmanr": corr_spear}
scores[task] = eval_metric["spearmanr"]['spearmanr']
elif task == "tweet_topic":
label_names = data.features['gold_label_list'].feature.names
with open(f"{opt.prediction_path}/tweet-topic.txt") as f:
lines = f.readlines()
lines = [l.strip('\n') for l in lines]
predictions = []
for l in lines:
pred_instance = [0] * len(label_names)
for label in l.split(','):
label = label.strip(' ')
if label in label_names:
pred_instance[label_names.index(label)] = 1
predictions.append(pred_instance)
# metric
gold_labels = data["gold_label_list"]
eval_metric = {"macro_f1": f1_score(gold_labels, predictions, average='macro')}
scores[task] = eval_metric["macro_f1"]
except FileNotFoundError:
not_found.append(task)
continue
# clusters/groups to evaluate
subgroups = {
"temporal": ["tweet_ner7", "tempo_wic", "tweet_topic", "tweet_nerd"],
"multi-label": ["tweet_topic", "tweet_emotion"],
"multi-class": ["tweet_sentiment", "tweet_hate"],
"regression": ["tweet_similarity", "tweet_intimacy", "tweet_sentiment"],
"target-based": ["tweet_sentiment", "tempo_wic", "tweet_nerd"],
"big-label": ["tweet_emoji", "tweet_topic"],
"disambiguation": ["tempo_wic", "tweet_nerd"],
"generation": ["tweet_qa", "tweet_qg"],
}
#scores = {k:round(v*100, 2) for k,v in scores.items()}
score_avg = np.mean(list(scores.values()))
subgroups_scores = {group: np.mean([scores[task] for task in subgroups[group] if task not in not_found]) for group in subgroups}
logging.info(f"Tasks not found: {not_found}\n")
logging.info("*** Scores ***")
for sc in scores:
logging.info(f"{sc}: {scores[sc]:.2f}")
logging.info("*** Clusters scores ***")
for group in subgroups:
logging.info(f"{group}: {subgroups_scores[group]:.2f}")
logging.info(f"*** Average score: {score_avg:.2f} ***")
# write scores to file
results = {"scores": scores, "clusters": subgroups_scores, "average": score_avg}
with open(opt.output_file, "w") as f:
json.dump(results, f, indent=4)