|
import pandas as pd |
|
from datasets import load_dataset |
|
|
|
table = [] |
|
task_description = { |
|
'tweet_intimacy': "regression on a single text", |
|
'tweet_ner7': "sequence labeling", |
|
'tweet_qa': "generation", |
|
'tweet_similarity': "regression on two texts", |
|
'tweet_topic': "multi-label classification", |
|
"tempo_wic": "binary classification on two texts", |
|
"tweet_sentiment": "ABSA on a five-point scale", |
|
"tweet_hate": "multi-class classification", |
|
"tweet_emoji": "multi-class classification", |
|
"tweet_nerd": "binary classification" |
|
} |
|
for task in task_description.keys(): |
|
data = load_dataset("cardiffnlp/super_tweet_eval", task) |
|
tmp_table = {"task": task, "description": task_description[task]} |
|
tmp_table['number of instances'] = " / ".join([str(len(data[s])) for s in ['train', 'validation', 'test']]) |
|
table.append(tmp_table) |
|
|
|
df = pd.DataFrame(table) |
|
print(df.to_markdown(index=False)) |
|
|