File size: 3,417 Bytes
bf6e4d1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
"""
Data service provider
"""
import json
from typing import List

import pandas as pd

from utils.cache_decorator import cache_df_with_custom_key, cache_dict_with_custom_key
from utils.http_utils import get

COLUMNS = ['model_name',
           'embd_dtype', 'embd_dim', 'num_params', 'max_tokens', 'similarity',
           'query_instruct', 'corpus_instruct',
           'ndcg_at_10',
           ]
COLUMNS_TYPES = ["markdown",
                 'str', 'str', 'number', 'number', 'str',
                 'str', 'str',
                 'number',
                 ]

GIT_URL = "https://raw.githubusercontent.com/embedding-benchmark/ebr/refs/heads/main/results/"
DATASET_URL = f"{GIT_URL}datasets.json"
MODEL_URL = f"{GIT_URL}models.json"
RESULT_URL = f"{GIT_URL}results.json"


class DataEngine:

    def __init__(self):
        self.df = self.init_dataframe()

    @property
    @cache_dict_with_custom_key("models")
    def models(self):
        """
        Get models data
        """
        res = get(MODEL_URL)
        if res.status_code == 200:
            return res.json()
        return {}

    @property
    @cache_dict_with_custom_key("datasets")
    def datasets(self):
        """
        Get tasks data
        """
        res = get(DATASET_URL)
        if res.status_code == 200:
            return res.json()
        return {}

    @property
    @cache_dict_with_custom_key("results")
    def results(self):
        """
        Get results data
        """
        res = get(RESULT_URL)
        if res.status_code == 200:
            return res.json()
        return {}

    def init_dataframe(self):
        """
        Initialize DataFrame
        """
        d = {"hello": [123], "world": [456]}
        return pd.DataFrame(d)

    @cache_df_with_custom_key("json_result")
    def jsons_to_df(self):

        results_list = self.results
        df_results_list = []
        for result_dict in results_list:
            dataset_name = result_dict["dataset_name"]
            df_result_row = pd.DataFrame(result_dict["results"])
            df_result_row["dataset_name"] = dataset_name
            df_results_list.append(df_result_row)
        df_result = pd.concat(df_results_list)

        df_datasets_list = []
        for item in self.datasets:
            dataset_names = item["datasets"]
            df_dataset_row = pd.DataFrame(
                {
                    "group_name": [item["name"] for _ in range(len(dataset_names))],
                    "dataset_name": dataset_names,
                    "leaderboard": [item["leaderboard"] for _ in range(len(dataset_names))]
                }
            )
            df_datasets_list.append(df_dataset_row)
        df_dataset = pd.concat(df_datasets_list).drop_duplicates()

        models_list = self.models

        df_model = pd.DataFrame(models_list)

        df = pd.merge(df_result, df_dataset, on=["dataset_name"], how="inner")
        df = df.groupby(["model_name", "group_name"], as_index=False)["ndcg_at_10"].mean()
        df = pd.merge(df, df_model, on=["model_name"], how="inner")

        if df.empty:
            return pd.DataFrame(columns=COLUMNS + ["group_name", "reference"])
        return df[COLUMNS + ["group_name", "reference"]]

    def filter_df(self, group_name: str):
        """
        filter_by_providers
        """
        df = self.jsons_to_df()

        return df[df["group_name"] == group_name][COLUMNS][:]