Datasets:
tner
/

Modalities:
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
File size: 2,652 Bytes
e83660f
 
 
 
 
 
3feb3b7
 
 
e83660f
3feb3b7
 
 
 
 
 
 
 
 
 
 
e83660f
 
3feb3b7
e83660f
3feb3b7
 
 
 
 
 
 
 
 
 
 
 
 
e83660f
 
 
3feb3b7
 
 
 
 
 
 
 
 
 
 
e83660f
3feb3b7
 
 
 
 
 
 
 
 
 
 
e83660f
 
 
3feb3b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e83660f
3feb3b7
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import json
import os
import requests

import pandas as pd

dataset_link = "[`tweetner7`](https://huggingface.co/datasets/tner/tweetner7)"
metric_dir = 'metric_files'
os.makedirs(metric_dir, exist_ok=True)


def lm_link(_model): return f"[`{_model}`](https://huggingface.co/{_model})"


def model_link(_model, _type): return f"[`tner/{_model}-tweetner7-{_type}`](https://huggingface.co/tner/{_model}-tweetner7-{_type})"


def download(_model, _type):
    url = f"https://huggingface.co/tner/{_model}-tweetner7-{_type}/raw/main/eval"
    filename = f"{metric_dir}/{_model}-{_type}.json"
    print(url, filename)
    try:
        with open(filename) as f:
            return json.load(f)
    except Exception:
        tmp = {}
    for metric in ["metric.test_2021", "metric.test_2020", "metric_span.test_2021", "metric_span.test_2020"]:
        year = metric[-4:]
        if metric not in tmp:
            _metric = json.loads(requests.get(f"{url}/{metric}.json").content)
            if '_span' in metric:
                tmp[f"Entity-Span F1 ({year})"] = round(100 * _metric["micro/f1"], 2)
            else:
                tmp[f"Micro F1 ({year})"] = round(100 * _metric["micro/f1"], 2)
                tmp[f"Macro F1 ({year})"] = round(100 * _metric["macro/f1"], 2)
                tmp.update({f"F1 ({year})/{k}": round(100 * v['f1'], 2) for k, v in _metric["per_entity_metric"].items()})
    with open(filename, "w") as f:
        json.dump(tmp, f)
    return tmp


lms = [
    "roberta-large",
    "roberta-base",
    "cardiffnlp/twitter-roberta-base-2019-90m",
    "cardiffnlp/twitter-roberta-base-dec2020",
    "cardiffnlp/twitter-roberta-base-dec2021"
    "vinai/bertweet-large",
    "vinai/bertweet-base",
    "bert-large",
    "bert-base"
]

types = [
    ["all", "continuous", "2021", "2020"],
    ["random"],
    [
        "selflabel2020",
        "selflabel2021",
        "2020-selflabel2020-all",
        "2020-selflabel2021-all",
        "selflabel2020-continuous",
        "selflabel2021-continuous"
    ]
]


for tt in types:
    metrics = []
    for t in tt:
        for lm in lms:

            if 'selflabel' in t and lm != "roberta-large":
                continue
            _lm_link = lm_link(lm)
            lm = os.path.basename(lm)
            _model_link = model_link(lm, t)
            __metric = {
                "Model (link)": model_link(lm, t),
                "Data": dataset_link,
                "Language Model": _lm_link
            }
            __metric.update(download(lm, t))
            metrics.append(__metric)

    df = pd.DataFrame(metrics)
    print(tt)
    print(df.to_markdown(index=False))
    print()