|
import json |
|
import os |
|
import requests |
|
|
|
import pandas as pd |
|
|
|
|
|
def download(filename, url): |
|
try: |
|
with open(filename) as f: |
|
json.load(f) |
|
except Exception: |
|
os.makedirs(os.path.dirname(filename), exist_ok=True) |
|
with open(filename, "wb") as f: |
|
r = requests.get(url) |
|
f.write(r.content) |
|
with open(filename) as f: |
|
tmp = json.load(f) |
|
return tmp |
|
|
|
|
|
|
|
models = [ |
|
"cardiffnlp/roberta-large-tweet-topic-multi-all", |
|
"cardiffnlp/roberta-base-tweet-topic-multi-all", |
|
"cardiffnlp/twitter-roberta-base-2019-90m-tweet-topic-multi-all", |
|
"cardiffnlp/twitter-roberta-base-dec2020-tweet-topic-multi-all", |
|
"cardiffnlp/twitter-roberta-base-dec2021-tweet-topic-multi-all", |
|
"cardiffnlp/roberta-large-tweet-topic-multi-2020", |
|
"cardiffnlp/roberta-base-tweet-topic-multi-2020", |
|
"cardiffnlp/twitter-roberta-base-2019-90m-tweet-topic-multi-2020", |
|
"cardiffnlp/twitter-roberta-base-dec2020-tweet-topic-multi-2020", |
|
"cardiffnlp/twitter-roberta-base-dec2021-tweet-topic-multi-2020" |
|
] |
|
|
|
os.makedirs("metric_files", exist_ok=True) |
|
|
|
metrics = [] |
|
for i in models: |
|
model_type = "all (2020 + 2021)" if i.endswith("all") else "2020 only" |
|
url = f"https://huggingface.co/{i}/raw/main/metric_summary.json" |
|
model_url = f"https://huggingface.co/{i}" |
|
metric = download(f"metric_files/{os.path.basename(i)}.json", url) |
|
metrics.append({"model": f"[{i}]({model_url})", "training data": model_type, "F1": metric["test/eval_f1"], "F1 (macro)": metric["test/eval_f1_macro"], "Accuracy": metric["test/eval_accuracy"]}) |
|
|
|
df = pd.DataFrame(metrics) |
|
print(df.to_markdown(index=False)) |
|
|
|
|