File size: 1,855 Bytes
af2acd4
 
c671908
af2acd4
 
 
 
 
 
 
 
 
 
 
1edd506
af2acd4
 
 
326ac2a
 
 
1edd506
 
 
 
326ac2a
 
 
 
af2acd4
 
 
 
 
 
c671908
 
1edd506
c671908
 
 
 
 
 
 
 
 
1edd506
c671908
 
 
1edd506
 
 
 
c671908
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
from dataclasses import dataclass

import streamlit as st
from huggingface_hub import DatasetFilter, HfApi
from huggingface_hub.hf_api import DatasetInfo


@dataclass(frozen=True, eq=True)
class EvaluationInfo:
    task: str
    model: str
    dataset_name: str
    dataset_config: str
    dataset_split: str
    metrics: set


def compute_evaluation_id(dataset_info: DatasetInfo) -> int:
    if dataset_info.cardData is not None:
        metadata = dataset_info.cardData["eval_info"]
        metadata.pop("col_mapping", None)
        # TODO(lewtun): populate dataset cards with metric info
        if "metrics" not in metadata:
            metadata["metrics"] = frozenset()
        metadata["metrics"] = frozenset(metadata["metrics"])
        evaluation_info = EvaluationInfo(**metadata)
        return hash(evaluation_info)
    else:
        return None


def get_evaluation_ids():
    filt = DatasetFilter(author="autoevaluate")
    evaluation_datasets = HfApi().list_datasets(filter=filt, full=True)
    return [compute_evaluation_id(dset) for dset in evaluation_datasets]


def filter_evaluated_models(models, task, dataset_name, dataset_config, dataset_split, metrics):
    evaluation_ids = get_evaluation_ids()

    for idx, model in enumerate(models):
        evaluation_info = EvaluationInfo(
            task=task,
            model=model,
            dataset_name=dataset_name,
            dataset_config=dataset_config,
            dataset_split=dataset_split,
            metrics=frozenset(metrics),
        )
        candidate_id = hash(evaluation_info)
        if candidate_id in evaluation_ids:
            st.info(
                f"Model `{model}` has already been evaluated on this configuration. \
                    This model will be excluded from the evaluation job..."
            )
            models.pop(idx)

    return models