Spaces:
AIR-Bench
/
Running on CPU Upgrade

File size: 2,334 Bytes
f766ce9
 
9134169
f766ce9
 
 
 
 
 
 
 
8b7a945
f766ce9
 
 
 
 
 
 
 
8b7a945
3b83af7
 
 
2edd122
3b83af7
9134169
 
 
 
8b7a945
3b83af7
8b7a945
9134169
3b83af7
9134169
 
3b83af7
9134169
2edd122
 
 
9134169
 
 
 
f766ce9
9134169
 
 
 
 
 
 
 
f766ce9
8b7a945
f766ce9
f30cbcc
 
9134169
 
f766ce9
9134169
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
from dataclasses import dataclass, make_dataclass

from src.benchmarks import BenchmarksQA, BenchmarksLongDoc


def fields(raw_class):
    return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]


# These classes are for user facing column names,
# to avoid having to change them all around the code
# when a modification is needed
@dataclass
class ColumnContent:
    name: str
    type: str
    displayed_by_default: bool
    hidden: bool = False
    never_hidden: bool = False


COL_NAME_AVG = "Average ⬆️"
COL_NAME_RETRIEVAL_MODEL = "Retrieval Model"
COL_NAME_RERANKING_MODEL = "Reranking Model"
COL_NAME_RANK = "Rank 🏆"

def make_autoevalcolumn(cls_name="BenchmarksQA", benchmarks=BenchmarksQA):
    ## Leaderboard columns
    auto_eval_column_dict = []
    # Init
    auto_eval_column_dict.append(
        ["retrieval_model", ColumnContent, ColumnContent(COL_NAME_RETRIEVAL_MODEL, "markdown", True, never_hidden=True)]
    )
    auto_eval_column_dict.append(
        ["reranking_model", ColumnContent, ColumnContent(COL_NAME_RERANKING_MODEL, "markdown", True, never_hidden=True)]
    )
    auto_eval_column_dict.append(
        ["average", ColumnContent, ColumnContent(COL_NAME_AVG, "number", True)]
    )
    auto_eval_column_dict.append(
        ["rank", ColumnContent, ColumnContent(COL_NAME_RANK, "number", True)]
    )
    for benchmark in benchmarks:
        auto_eval_column_dict.append(
            [benchmark.name, ColumnContent, ColumnContent(benchmark.value.col_name, "number", True)]
        )

    # We use make dataclass to dynamically fill the scores from Tasks
    return make_dataclass(cls_name, auto_eval_column_dict, frozen=True)


AutoEvalColumnQA = make_autoevalcolumn(
    "AutoEvalColumnQA", BenchmarksQA)
AutoEvalColumnLongDoc = make_autoevalcolumn(
    "AutoEvalColumnLongDoc", BenchmarksLongDoc)


# Column selection
COLS_QA = [c.name for c in fields(AutoEvalColumnQA) if not c.hidden]
COLS_LONG_DOC = [c.name for c in fields(AutoEvalColumnLongDoc) if not c.hidden]
TYPES = [c.type for c in fields(AutoEvalColumnQA) if not c.hidden]
COLS_LITE = [c.name for c in fields(AutoEvalColumnQA) if c.displayed_by_default and not c.hidden]

QA_BENCHMARK_COLS = [t.value.col_name for t in BenchmarksQA]

LONG_DOC_BENCHMARK_COLS = [t.value.col_name for t in BenchmarksLongDoc]