File size: 8,220 Bytes
7dfe065
 
a8ede2f
 
7dfe065
a8ede2f
855cd65
a8ede2f
 
 
 
 
 
 
 
d40f223
fb33b22
855cd65
a8ede2f
855cd65
a8ede2f
7dfe065
a8ede2f
 
 
 
 
 
 
 
 
 
018441b
 
a8ede2f
7dfe065
a8ede2f
 
 
d40f223
a8ede2f
018441b
7dfe065
 
 
 
855cd65
7dfe065
 
669da77
855cd65
 
a8ede2f
 
855cd65
 
 
 
 
 
 
a8ede2f
855cd65
 
 
 
 
 
 
 
f69201c
a8ede2f
 
855cd65
 
 
 
 
 
 
a8ede2f
 
 
 
 
 
 
 
 
 
855cd65
 
 
 
 
a8ede2f
 
855cd65
 
a8ede2f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
855cd65
 
a8ede2f
 
 
855cd65
 
 
 
a8ede2f
855cd65
a8ede2f
 
 
 
 
 
 
 
 
 
 
 
 
855cd65
 
 
 
 
7f2fc59
 
 
 
 
 
855cd65
7f2fc59
a8ede2f
7f2fc59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81b5773
7f2fc59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38a6e1d
7f2fc59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81b5773
7f2fc59
 
81b5773
a8ede2f
7f2fc59
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
#!/usr/bin/env python

import gradio as gr
import pandas as pd

from apscheduler.schedulers.background import BackgroundScheduler

from huggingface_hub import snapshot_download

from src.display.about import (
    CITATION_BUTTON_LABEL,
    CITATION_BUTTON_TEXT,
    EVALUATION_QUEUE_TEXT,
    INTRODUCTION_TEXT,
    LLM_BENCHMARKS_TEXT,
    LLM_BENCHMARKS_DETAILS,
    FAQ_TEXT,
    TITLE
)

from src.display.css_html_js import custom_css

from src.display.utils import (
    BENCHMARK_COLS,
    COLS,
    EVAL_COLS,
    EVAL_TYPES,
    NUMERIC_INTERVALS,
    TYPES,
    AutoEvalColumn,
    ModelType,
    fields,
    WeightType,
    Precision
)

from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, H4_TOKEN, IS_PUBLIC, QUEUE_REPO, REPO_ID, RESULTS_REPO
from src.populate import get_evaluation_queue_df, get_leaderboard_df
from src.submission.submit import add_new_eval
from src.utils import get_dataset_summary_table


def ui_snapshot_download(repo_id, local_dir, repo_type, tqdm_class, etag_timeout):
    try:
        print(local_dir)
        snapshot_download(repo_id=repo_id, local_dir=local_dir, repo_type=repo_type, tqdm_class=tqdm_class, etag_timeout=etag_timeout)
    except Exception as e:
        restart_space()


def restart_space():
    API.restart_space(repo_id=REPO_ID, token=H4_TOKEN)


def init_space():
    dataset_df = get_dataset_summary_table(file_path='blog/Hallucination-Leaderboard-Summary.csv')

    import socket
    if socket.gethostname() not in {'neuromancer'}:
        ui_snapshot_download(repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30)
        ui_snapshot_download(repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30)

    raw_data, original_df = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)

    finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_df = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
    return dataset_df, original_df, finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_df


dataset_df, original_df, finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_df = init_space()
leaderboard_df = original_df.copy()


# Searching and filtering
def update_table(hidden_df: pd.DataFrame,
                 columns: list,
                 type_query: list,
                 precision_query: list,
                 size_query: list,
                 query: str):
    filtered_df = filter_models(hidden_df, type_query, size_query, precision_query)
    filtered_df = filter_queries(query, filtered_df)
    df = select_columns(filtered_df, columns)
    return df


def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:
    return df[(df[AutoEvalColumn.dummy.name].str.contains(query, case=False))]


def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
    # always_here_cols = [AutoEvalColumn.model_type_symbol.name, AutoEvalColumn.model.name]

    always_here_cols = [c.name for c in fields(AutoEvalColumn) if c.never_hidden]
    dummy_col = [AutoEvalColumn.dummy.name]

    # We use COLS to maintain sorting
    filtered_df = df[
        # always_here_cols + [c for c in COLS if c in df.columns and c in columns] + [AutoEvalColumn.dummy.name]
        always_here_cols + [c for c in COLS if c in df.columns and c in columns] + dummy_col
    ]
    return filtered_df


def filter_queries(query: str, filtered_df: pd.DataFrame):
    final_df = []
    if query != "":
        queries = [q.strip() for q in query.split(";")]
        for _q in queries:
            _q = _q.strip()
            if _q != "":
                temp_filtered_df = search_table(filtered_df, _q)
                if len(temp_filtered_df) > 0:
                    final_df.append(temp_filtered_df)
        if len(final_df) > 0:
            filtered_df = pd.concat(final_df)
            subset = [AutoEvalColumn.model.name, AutoEvalColumn.precision.name, AutoEvalColumn.revision.name]
            filtered_df = filtered_df.drop_duplicates(subset=subset)
    return filtered_df


def filter_models(df: pd.DataFrame,
                  type_query: list,
                  size_query: list,
                  precision_query: list) -> pd.DataFrame:
    # Show all models
    filtered_df = df

    type_emoji = [t[0] for t in type_query]
    filtered_df = filtered_df.loc[df[AutoEvalColumn.model_type_symbol.name].isin(type_emoji)]
    filtered_df = filtered_df.loc[df[AutoEvalColumn.precision.name].isin(precision_query + ["None"])]

    numeric_interval = pd.IntervalIndex(sorted([NUMERIC_INTERVALS[s] for s in size_query]))
    params_column = pd.to_numeric(df[AutoEvalColumn.params.name], errors="coerce")
    mask = params_column.apply(lambda x: any(numeric_interval.contains(x)))
    filtered_df = filtered_df.loc[mask]

    return filtered_df


# triggered only once at startup => read query parameter if it exists
def load_query(request: gr.Request):
    query = request.query_params.get("query") or ""
    return query

leaderboard_df = filter_models(
    df=leaderboard_df,
    type_query=[t.to_str(" : ") for t in ModelType],
    size_query=list(NUMERIC_INTERVALS.keys()),
    precision_query=[i.value.name for i in Precision],
)

import unicodedata

def is_valid_unicode(char):
    try:
        unicodedata.name(char)
        return True  # Valid Unicode character
    except ValueError:
        return False  # Invalid Unicode character

def remove_invalid_unicode(input_string):
    if isinstance(input_string, str):
        valid_chars = [char for char in input_string if is_valid_unicode(char)]
        return ''.join(valid_chars)
    else:
        return input_string  # Return non-string values as is

dummy1 = gr.Textbox(visible=False)

hidden_leaderboard_table_for_search = gr.components.Dataframe(
    headers=COLS,
    datatype=TYPES,
    visible=False,
    line_breaks=False,
    interactive=False
)

def display(x, y):
    # Assuming df is your DataFrame
    for column in leaderboard_df.columns:
        if leaderboard_df[column].dtype == 'object':
            leaderboard_df[column] = leaderboard_df[column].apply(remove_invalid_unicode)

    subset_df = leaderboard_df[COLS]
    return subset_df

INTRODUCTION_TEXT = """
This is a copied space from LLM Trustworthy Leaderboard. Instead of displaying
the results as table this space was modified to simply provides a gradio API interface. 
Using the following python script below, users can access the full leaderboard data easily.
Python on how to access the data:
```python
# Import dependencies
from gradio_client import Client
# Initialize the Gradio client with the API URL
client = Client("https://rodrigomasini-data-only-hallucination-leaderboard.hf.space/")
try:
    # Perform the API call
    response = client.predict("","", api_name='/predict')
    # Check if response it's directly accessible
    if len(response) > 0:
        print("Response received!")
        headers = response.get('headers', [])
        data = response.get('data', [])
        print(headers)
        # Remove commenst if you want to download the dataset and save in csv format
        # Specify the path to your CSV file
        #csv_file_path = 'llm-trustworthy-benchmark.csv'
        # Open the CSV file for writing
        #with open(csv_file_path, mode='w', newline='', encoding='utf-8') as file:
        #    writer = csv.writer(file)
            # Write the headers
        #    writer.writerow(headers)
            # Write the data
        #    for row in data:
        #        writer.writerow(row)
        #print(f"Results saved to {csv_file_path}")
    # If the above line prints a string that looks like JSON, you can parse it with json.loads(response)
    # Otherwise, you might need to adjust based on the actual structure of `response`
except Exception as e:
    print(f"An error occurred: {e}")
```
"""

interface = gr.Interface(
    fn=display,
    inputs=[gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text"), dummy1],
    outputs=[hidden_leaderboard_table_for_search]
)

scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=1800)

scheduler.start()

interface.launch()