File size: 4,186 Bytes
2c89e68
 
 
056eba8
b98f07f
 
aaa5b50
056eba8
c0da33d
 
 
056eba8
8dde322
056eba8
 
818f024
aaa5b50
b98f07f
 
 
 
2c89e68
 
 
 
 
 
 
 
 
 
 
b98f07f
95eba41
b98f07f
 
2c89e68
b98f07f
2c89e68
 
 
95eba41
2c89e68
b98f07f
95eba41
 
 
102e645
3817014
 
2c89e68
b98f07f
 
 
 
 
 
2c89e68
 
 
 
3817014
 
 
 
 
b98f07f
 
2c89e68
 
 
b98f07f
 
 
 
 
 
 
 
2c89e68
42a9114
 
 
 
 
 
 
 
 
 
b98f07f
056eba8
42a9114
818f024
2c89e68
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import glob
import json
import os
from typing import List

from huggingface_hub import HfApi
from tqdm import tqdm

from src.get_model_info.hardocded_metadata.flags import DO_NOT_SUBMIT_MODELS, FLAGGED_MODELS
from src.get_model_info.hardocded_metadata.types import MODEL_TYPE_METADATA, ModelType, model_type_from_str
from src.get_model_info.utils import AutoEvalColumn, model_hyperlink

api = HfApi(token=os.environ.get("H4_TOKEN", None))


def get_model_metadata(leaderboard_data: List[dict]):
    for model_data in tqdm(leaderboard_data):
        request_files = os.path.join(
            "eval-queue",
            model_data["model_name_for_query"] + "_eval_request_*" + ".json",
        )
        request_files = glob.glob(request_files)

        # Select correct request file (precision)
        request_file = ""
        if len(request_files) == 1:
            request_file = request_files[0]
        elif len(request_files) > 1:
            request_files = sorted(request_files, reverse=True)
            for tmp_request_file in request_files:
                with open(tmp_request_file, "r") as f:
                    req_content = json.load(f)
                    if (
                        req_content["status"] in ["FINISHED", "PENDING_NEW_EVAL"]
                        and req_content["precision"] == model_data["Precision"].split(".")[-1]
                    ):
                        request_file = tmp_request_file

        try:
            with open(request_file, "r") as f:
                request = json.load(f)
            model_type = model_type_from_str(request.get("model_type", ""))
            model_data[AutoEvalColumn.model_type.name] = model_type.value.name
            model_data[AutoEvalColumn.model_type_symbol.name] = model_type.value.symbol  # + ("🔺" if is_delta else "")
            model_data[AutoEvalColumn.license.name] = request.get("license", "?")
            model_data[AutoEvalColumn.likes.name] = request.get("likes", 0)
            model_data[AutoEvalColumn.params.name] = request.get("params", 0)
        except Exception:
            print(f"Could not find request file for {model_data['model_name_for_query']}")

            if model_data["model_name_for_query"] in MODEL_TYPE_METADATA:
                model_data[AutoEvalColumn.model_type.name] = MODEL_TYPE_METADATA[
                    model_data["model_name_for_query"]
                ].value.name
                model_data[AutoEvalColumn.model_type_symbol.name] = MODEL_TYPE_METADATA[
                    model_data["model_name_for_query"]
                ].value.symbol  # + ("🔺" if is_delta else "")
            else:
                model_data[AutoEvalColumn.model_type.name] = ModelType.Unknown.value.name
                model_data[AutoEvalColumn.model_type_symbol.name] = ModelType.Unknown.value.symbol

            # if we cannot find a request file, set license and likes to unknown
            model_data[AutoEvalColumn.license.name] =  "?"
            model_data[AutoEvalColumn.likes.name] = 0
            model_data[AutoEvalColumn.params.name] =  0


def flag_models(leaderboard_data: List[dict]):
    for model_data in leaderboard_data:
        if model_data["model_name_for_query"] in FLAGGED_MODELS:
            issue_num = FLAGGED_MODELS[model_data["model_name_for_query"]].split("/")[-1]
            issue_link = model_hyperlink(
                FLAGGED_MODELS[model_data["model_name_for_query"]],
                f"See discussion #{issue_num}",
            )
            model_data[
                AutoEvalColumn.model.name
            ] = f"{model_data[AutoEvalColumn.model.name]} has been flagged! {issue_link}"


def remove_forbidden_models(leaderboard_data: List[dict]):
    indices_to_remove = []
    for ix, model in enumerate(leaderboard_data):
        if model["model_name_for_query"] in DO_NOT_SUBMIT_MODELS:
            indices_to_remove.append(ix)

    for ix in reversed(indices_to_remove):
        leaderboard_data.pop(ix)
    return leaderboard_data


def apply_metadata(leaderboard_data: List[dict]):
    leaderboard_data = remove_forbidden_models(leaderboard_data)
    get_model_metadata(leaderboard_data)
    flag_models(leaderboard_data)