import json import os import pandas as pd from src.display.formatting import has_no_nan_values, make_clickable_model from src.display.utils import AutoEvalColumn, EvalQueueColumn from src.leaderboard.read_evals import get_raw_eval_results def get_leaderboard_df(eval_results_path, eval_requests_path, cols, benchmark_cols): # Load evaluation results into a DataFrame df = load_evaluation_results(eval_results_path) # Check if the DataFrame is empty if df.empty: print("No evaluation results found. The leaderboard is currently empty.") # Create an empty DataFrame with the required columns df = pd.DataFrame(columns=cols) return df # Proceed to sort the DataFrame by 'average' if it's not empty df = df.sort_values(by=['average'], ascending=False) return df def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]: """Creates the different dataframes for the evaluation queues requests""" # Since evaluations are performed immediately, the queues will be empty # We'll return empty DataFrames for compatibility df_empty = pd.DataFrame(columns=cols) return df_empty, df_empty, df_empty