Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
1k<10K
ArXiv:
Tags:
License:
File size: 1,656 Bytes
e7a1529
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
01e0ecb
e7a1529
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import json
import os
import requests

import pandas as pd


def download(filename, url):
    try:
        with open(filename) as f:
            json.load(f)
    except Exception:
        os.makedirs(os.path.dirname(filename), exist_ok=True)
        with open(filename, "wb") as f:
            r = requests.get(url)
            f.write(r.content)
    with open(filename) as f:
        tmp = json.load(f)
    return tmp



models = [
  "cardiffnlp/roberta-large-tweet-topic-single-all",
  "cardiffnlp/roberta-base-tweet-topic-single-all",
  "cardiffnlp/twitter-roberta-base-2019-90m-tweet-topic-single-all",
  "cardiffnlp/twitter-roberta-base-dec2020-tweet-topic-single-all",
  "cardiffnlp/twitter-roberta-base-dec2021-tweet-topic-single-all",
  "cardiffnlp/roberta-large-tweet-topic-single-2020",
  "cardiffnlp/roberta-base-tweet-topic-single-2020",
  "cardiffnlp/twitter-roberta-base-2019-90m-tweet-topic-single-2020",
  "cardiffnlp/twitter-roberta-base-dec2020-tweet-topic-single-2020",
  "cardiffnlp/twitter-roberta-base-dec2021-tweet-topic-single-2020"
]

os.makedirs("metric_files", exist_ok=True)

metrics = []
for i in models:
    model_type = "all (2020 + 2021)" if i.endswith("all") else "2020 only"
    url = f"https://huggingface.co/{i}/raw/main/metric_summary.json"
    model_url = f"https://huggingface.co/{i}"
    metric = download(f"metric_files/{os.path.basename(i)}.json", url)
    metrics.append({"model": f"[{i}]({model_url})", "training data": model_type, "F1": metric["test/eval_f1"], "F1 (macro)": metric["test/eval_f1_macro"], "Accuracy": metric["test/eval_accuracy"]})

df = pd.DataFrame(metrics)
print(df.to_markdown(index=False))