File size: 4,792 Bytes
a8ede2f
 
58991e2
7d1a89f
a8ede2f
 
 
dc1ba50
a8ede2f
58991e2
a8ede2f
7d1a89f
 
a8ede2f
a16057d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7d1a89f
 
 
fdb7c69
7d1a89f
 
 
dd383e8
fdb7c69
b142b2c
 
58991e2
7d1a89f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dc1ba50
a8ede2f
 
 
 
7d1a89f
 
 
5e86dd4
 
 
 
7d1a89f
 
 
 
5e86dd4
 
 
 
 
 
a8ede2f
 
 
dc1ba50
a8ede2f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
import json
import os
from tqdm import tqdm
import copy
import pandas as pd

from src.display.formatting import has_no_nan_values, make_clickable_model
from src.display.utils import AutoEvalColumn, EvalQueueColumn
from src.leaderboard.filter_models import filter_models
from src.leaderboard.read_evals import get_raw_eval_results, EvalResult, update_model_type_with_open_llm_request_file

from src.backend.envs import Tasks as BackendTasks
from src.display.utils import Tasks

factuality_tasks = [
    "NQ Open/EM", 
    "TriviaQA/EM", 
    "PopQA/EM", 
    "FEVER/Acc", 
    "TrueFalse/Acc",
    "TruthQA MC2/Acc",
]
faithfulness_tasks = [
    "MemoTrap/Acc", 
    "IFEval/Acc", 
    "NQ-Swap/EM", 
    "RACE/Acc", 
    "SQuADv2/EM", 
    "CNN-DM/ROUGE", 
    "XSum/ROUGE", 
    "HaluQA/Acc", 
    "FaithDial/Acc", 
]

def get_leaderboard_df(results_path: str,
                       requests_path: str,
                       requests_path_open_llm: str,
                       cols: list,
                       benchmark_cols: list,
                       is_backend: bool = False) -> tuple[list[EvalResult], pd.DataFrame]:
    # Returns a list of EvalResult
    raw_data: list[EvalResult] = get_raw_eval_results(results_path, requests_path, requests_path_open_llm)
    if requests_path_open_llm != "":
        for result_idx in tqdm(range(len(raw_data)), desc="updating model type with open llm leaderboard"):
            raw_data[result_idx] = update_model_type_with_open_llm_request_file(raw_data[result_idx], requests_path_open_llm)

    all_data_json_ = [v.to_dict() for v in raw_data if v.is_complete()]

    name_to_bm_map = {}

    task_iterator = Tasks
    if is_backend is True:
        task_iterator = BackendTasks

    for task in task_iterator:
        task = task.value
        name = task.col_name
        bm = (task.benchmark, task.metric)
        name_to_bm_map[name] = bm

    # bm_to_name_map = {bm: name for name, bm in name_to_bm_map.items()}

    all_data_json = []
    for entry in all_data_json_:
        new_entry = copy.deepcopy(entry)

        for k, v in entry.items():
            if k in name_to_bm_map:
                benchmark, metric = name_to_bm_map[k]
                new_entry[k] = entry[k][metric]

        all_data_json += [new_entry]

    # all_data_json.append(baseline_row)
    filter_models(all_data_json)

    df = pd.DataFrame.from_records(all_data_json)

    # if AutoEvalColumn.average.name in df:
    #     df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)

    cols_mod = copy.deepcopy(cols)
    cols_mod.remove('Faithfulness')
    cols_mod.remove('Factuality')
    df = df[cols_mod]#.round(decimals=2)

    # filter out if any of the benchmarks have not been produced
    df = df[has_no_nan_values(df, benchmark_cols)]

    Factuality_score = df[factuality_tasks].mean(axis=1)
    Faithfulness_score = df[faithfulness_tasks].mean(axis=1)
    df.insert(2, 'Factuality', Factuality_score)
    df.insert(2, 'Faithfulness', Faithfulness_score)
    df = df.round(decimals=2)

    return raw_data, df


def get_evaluation_queue_df(save_path: str, cols: list) -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
    entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
    all_evals = []

    for entry in entries:
        if ".json" in entry:
            file_path = os.path.join(save_path, entry)
            with open(file_path) as fp:
                data = json.load(fp)

            data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
            data[EvalQueueColumn.revision.name] = data.get("revision", "main")

            all_evals.append(data)
        elif ".md" not in entry:
            # this is a folder
            sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if not e.startswith(".")]
            for sub_entry in sub_entries:
                file_path = os.path.join(save_path, entry, sub_entry)
                with open(file_path) as fp:
                    data = json.load(fp)

                data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
                data[EvalQueueColumn.revision.name] = data.get("revision", "main")
                all_evals.append(data)

    pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
    running_list = [e for e in all_evals if e["status"] == "RUNNING"]
    finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
    df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
    df_running = pd.DataFrame.from_records(running_list, columns=cols)
    df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
    return df_finished[cols], df_running[cols], df_pending[cols]