File size: 3,372 Bytes
8c49cb6
 
 
a27decd
8c49cb6
 
df66f6e
314f91a
b1a1395
8c49cb6
 
3dfaf22
 
b1a1395
8c49cb6
b1a1395
4aac09f
6403334
18e7b60
a8dd915
a27decd
a8dd915
a27decd
 
a8dd915
a27decd
a8dd915
a27decd
 
 
 
9eec195
a8dd915
18e7b60
6f6ca1d
 
18e7b60
 
6f6ca1d
e48fcaf
18e7b60
8c49cb6
 
 
 
b1a1395
8c49cb6
 
adb0416
8c49cb6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eed1ccd
8c49cb6
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import json
import os

import numpy as np
import pandas as pd

from src.display.formatting import has_no_nan_values, make_clickable_model
from src.display.utils import AutoEvalColumn, EvalQueueColumn
from src.leaderboard.read_evals import get_raw_eval_results


def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
    raw_data = get_raw_eval_results(results_path, requests_path)
    all_data_json = [v.to_dict() for v in raw_data]

    df = pd.DataFrame.from_records(all_data_json)
    # df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
    # df = df.sort_values(by=[AutoEvalColumn.task5.name], ascending=True)

    df[AutoEvalColumn.task2.name] = pd.Series(
        np.stack(
            np.array(df[AutoEvalColumn.task2.name].values)
        ).squeeze()
    )
    df[AutoEvalColumn.task3.name] = pd.Series(
        np.stack(
            np.array(df[AutoEvalColumn.task3.name].values)
        ).squeeze()
    )

    
    mos_rank = df[AutoEvalColumn.task2.name].rank(method="min", numeric_only=True, ascending=False)
    bitrate_rank = df[AutoEvalColumn.task3.name].rank(method="min", numeric_only=True, ascending=True)
    df["Ranking"] = pd.Series((mos_rank + bitrate_rank)/2)
    df["revert_task2"] = -df[AutoEvalColumn.task2.name]
    df = df.sort_values(by=["Ranking", "revert_task2"], ascending=True)
    df["Rank"] = df.groupby("Precision").cumcount() + 1
    df.pop("Ranking")
    df.pop("revert_task2")

    
    df = df[cols].round(decimals=2)

    # filter out if any of the benchmarks have not been produced
    df = df[has_no_nan_values(df, benchmark_cols)]
    return raw_data, df


def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
    entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
    all_evals = []

    for entry in entries:
        if ".json" in entry:
            file_path = os.path.join(save_path, entry)
            with open(file_path) as fp:
                data = json.load(fp)

            data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
            data[EvalQueueColumn.revision.name] = data.get("revision", "main")

            all_evals.append(data)
        elif ".md" not in entry:
            # this is a folder
            sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if not e.startswith(".")]
            for sub_entry in sub_entries:
                file_path = os.path.join(save_path, entry, sub_entry)
                with open(file_path) as fp:
                    data = json.load(fp)

                data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
                data[EvalQueueColumn.revision.name] = data.get("revision", "main")
                all_evals.append(data)

    pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
    running_list = [e for e in all_evals if e["status"] == "RUNNING"]
    finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
    df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
    df_running = pd.DataFrame.from_records(running_list, columns=cols)
    df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
    return df_finished[cols], df_running[cols], df_pending[cols]