File size: 4,940 Bytes
5b34d46
 
 
 
 
 
 
 
 
 
 
 
 
 
ce5dc1f
5b34d46
 
 
 
ce5dc1f
5b34d46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ce5dc1f
 
5b34d46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ce5dc1f
 
 
 
 
 
 
 
 
 
2a09920
 
 
 
 
 
 
 
 
ce5dc1f
 
 
 
 
 
 
 
2a09920
ce5dc1f
 
 
 
2a09920
 
 
 
 
ce5dc1f
2a09920
ce5dc1f
5b34d46
 
ce5dc1f
 
5b34d46
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
"""
Data service provider
"""
import json
from typing import List

import pandas as pd

from utils.cache_decorator import cache_df_with_custom_key, cache_dict_with_custom_key
from utils.http_utils import get

COLUMNS = ['model_name',
           'embd_dtype', 'embd_dim', 'num_params', 'max_tokens', 'similarity',
           'query_instruct', 'corpus_instruct',

           ]
COLUMNS_TYPES = ["markdown",
                 'str', 'str', 'number', 'number', 'str',
                 'str', 'str',

                 ]

GIT_URL = "https://raw.githubusercontent.com/embedding-benchmark/ebr/refs/heads/main/results/"
DATASET_URL = f"{GIT_URL}datasets.json"
MODEL_URL = f"{GIT_URL}models.json"
RESULT_URL = f"{GIT_URL}results.json"


class DataEngine:

    def __init__(self):
        self.df = self.init_dataframe()

    @property
    @cache_dict_with_custom_key("models")
    def models(self):
        """
        Get models data
        """
        res = get(MODEL_URL)
        if res.status_code == 200:
            return res.json()
        return {}

    @property
    @cache_dict_with_custom_key("datasets")
    def datasets(self):
        """
        Get tasks data
        """
        res = get(DATASET_URL)
        if res.status_code == 200:
            return res.json()
        return {}

    @property
    @cache_dict_with_custom_key("results")
    def results(self):
        """
        Get results data
        """
        res = get(RESULT_URL)
        if res.status_code == 200:
            return res.json()
        return {}

    def init_dataframe(self):
        """
        Initialize DataFrame
        """
        d = {"hello": [123], "world": [456]}
        return pd.DataFrame(d)

    @cache_df_with_custom_key("json_result")
    def jsons_to_df(self):

        results_list = self.results
        df_results_list = []
        for result_dict in results_list:
            dataset_name = result_dict["dataset_name"]
            df_result_row = pd.DataFrame(result_dict["results"])
            df_result_row["dataset_name"] = dataset_name
            df_results_list.append(df_result_row)
        df_result = pd.concat(df_results_list)

        df_result = df_result[["model_name", "dataset_name", "ndcg_at_10"]]

        df_datasets_list = []
        for item in self.datasets:
            dataset_names = item["datasets"]
            df_dataset_row = pd.DataFrame(
                {
                    "group_name": [item["name"] for _ in range(len(dataset_names))],
                    "dataset_name": dataset_names,
                    "leaderboard": [item["leaderboard"] for _ in range(len(dataset_names))]
                }
            )
            df_datasets_list.append(df_dataset_row)
        df_dataset = pd.concat(df_datasets_list).drop_duplicates()

        models_list = self.models

        df_model = pd.DataFrame(models_list)

        df = pd.merge(df_result, df_dataset, on=["dataset_name"], how="inner")
        # df = pd.merge(df, df_model, on=["model_name"], how="inner")

        dataset_num_map = {}
        grouped_dataset_count = df.groupby(["group_name"]).agg({
            "dataset_name": "nunique"
        }).reset_index()

        for _, row in grouped_dataset_count.iterrows():
            dataset_num_map[row["group_name"]] = row["dataset_name"]

        # Create a list of open datasets
        open_datasets = []
        for result in results_list:
            if not result.get("is_closed", False):
                open_datasets.append(result["dataset_name"])

        # Count open datasets
        open_dataset_count = len(open_datasets)

        grouped_model = df.groupby(["model_name", "group_name"]).agg({
            "ndcg_at_10": "mean",
        }).reset_index()

        pivot = grouped_model.pivot(index="model_name", columns="group_name", values=["ndcg_at_10"])

        # Rename columns
        pivot.columns = list(
            map(lambda x: f"{x[1].capitalize()} Average ({dataset_num_map[x[1]]} datasets)" if x[1] != 'text' else f"Average ({dataset_num_map[x[1]]} datasets)",
                pivot.columns))

        pivot_dataset = df_result.pivot(index="model_name", columns="dataset_name", values="ndcg_at_10")

        # Calculate open average
        open_df = df_result[df_result["dataset_name"].isin(open_datasets)]
        open_avg = open_df.groupby("model_name")["ndcg_at_10"].mean().reset_index()
        open_avg = open_avg.rename(columns={"ndcg_at_10": f"Open average ({open_dataset_count} datasets)"})

        df = pd.merge(df_model, pivot, on="model_name")
        df = pd.merge(df, open_avg, on="model_name")
        df = pd.merge(df, pivot_dataset, on="model_name")

        if df.empty:
            return pd.DataFrame(columns=COLUMNS + ["reference"])
        return df

    def filter_df(self, group_name: str):
        """
        filter_by_providers
        """
        df = self.jsons_to_df()

        return df[df["group_name"] == group_name][COLUMNS][:]