MirakramAghalarov's picture
Productin Commit
a76b907
import glob
import json
import math
import os
from dataclasses import dataclass
import dateutil
import numpy as np
from src.display.formatting import make_clickable_model
from src.display.utils import AutoEvalColumn, Tasks, Groups
@dataclass
class EvalResult:
eval_name: str # org_model_date (uid)
full_model: str # org/model (path on hub)
org: str
model: str
results: dict
date: str = "" # submission date of request file
@classmethod
def init_from_json_file(self, json_filepath):
"""Inits the result from the specific model result file"""
with open(json_filepath) as fp:
data = json.load(fp)
config = data.get("config")
# Get model and org
org_and_model = config.get("model_name", None)
org_and_model = org_and_model.split("/", 1)
org = org_and_model[0]
model = org_and_model[1]
date = config.get("submitted_time", None)
result_key = f"{org}_{model}_{date}"
full_model = "/".join(org_and_model)
# Extract results available in this file (some results are split in several files)
results = {}
for task in Tasks:
# We average all scores of a given metric (not all metrics are present in all files)
accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k])
if accs.size == 0 or any([acc is None for acc in accs]):
continue
mean_acc = np.mean(accs) * 100.0
results[task.benchmark] = mean_acc
return self(
eval_name=result_key,
full_model=full_model,
org=org,
model=model,
results=results,
date=date
)
def to_dict(self):
"""Converts the Eval Result to a dict compatible with our dataframe display"""
average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
data_dict = {
"eval_name": self.eval_name, # not a column, just a save name,
AutoEvalColumn.model_submission_date.name: self.date,
AutoEvalColumn.model.name: make_clickable_model(self.full_model),
AutoEvalColumn.dummy.name: self.full_model,
AutoEvalColumn.average.name: average,
}
for task in Tasks:
data_dict[task.col_name] = self.results[task.benchmark]
return data_dict
@dataclass
class EvalResultGroup:
eval_name: str # org_model_date (uid)
full_model: str # org/model (path on hub)
org: str
model: str
results: dict
date: str = "" # submission date of request file
@classmethod
def init_from_json_file(self, json_filepath):
"""Inits the result from the specific model result file"""
with open(json_filepath) as fp:
data = json.load(fp)
config = data.get("config")
# Get model and org
org_and_model = config.get("model_name", None)
org_and_model = org_and_model.split("/", 1)
org = org_and_model[0]
model = org_and_model[1]
date = config.get("submitted_time", None)
result_key = f"{org}_{model}_{date}"
full_model = "/".join(org_and_model)
# Extract results available in this file (some results are split in several files)
results = {}
for task in Groups:
# We average all scores of a given metric (not all metrics are present in all files)
accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k])
if accs.size == 0 or any([acc is None for acc in accs]):
continue
mean_acc = np.mean(accs) * 100.0
results[task.benchmark] = mean_acc
return self(
eval_name=result_key,
full_model=full_model,
org=org,
model=model,
results=results,
date=date
)
def to_dict(self):
"""Converts the Eval Result to a dict compatible with our dataframe display"""
average = sum([v for v in self.results.values() if v is not None]) / len(Groups)
data_dict = {
"eval_name": self.eval_name, # not a column, just a save name,
AutoEvalColumn.model_submission_date.name: self.date,
AutoEvalColumn.model.name: make_clickable_model(self.full_model),
AutoEvalColumn.dummy.name: self.full_model,
AutoEvalColumn.average.name: average,
}
for task in Groups:
data_dict[task.col_name] = self.results[task.benchmark]
return data_dict
def get_raw_eval_results(results_path: str) -> list[EvalResult]:
"""From the path of the results folder root, extract all needed info for results"""
model_result_filepaths = []
for root, _, files in os.walk(results_path):
# We should only have json files in model results
if len(files) == 0 or any([not f.endswith(".json") for f in files]):
continue
# Sort the files by date
files.sort(key=lambda x: x.removesuffix(".json").removeprefix("results_")[:-7])
for file in files:
model_result_filepaths.append(os.path.join(root, file))
eval_results = {}
for model_result_filepath in model_result_filepaths:
# Creation of result
eval_result = EvalResult.init_from_json_file(model_result_filepath)
# Store results of same eval together
eval_name = eval_result.eval_name
eval_results[eval_name] = eval_result
results = []
for v in eval_results.values():
try:
v.to_dict() # we test if the dict version is complete
results.append(v)
except KeyError: # not all eval values present
continue
return results
def get_group_eval_results(results_path: str) -> list[EvalResultGroup]:
"""From the path of the results folder root, extract all needed info for results"""
model_result_filepaths = []
for root, _, files in os.walk(results_path):
# We should only have json files in model results
if len(files) == 0 or any([not f.endswith(".json") for f in files]):
continue
# Sort the files by date
files.sort(key=lambda x: x.removesuffix(".json").removeprefix("results_")[:-7])
for file in files:
model_result_filepaths.append(os.path.join(root, file))
eval_results = {}
for model_result_filepath in model_result_filepaths:
# Creation of result
eval_result = EvalResultGroup.init_from_json_file(model_result_filepath)
# Store results of same eval together
eval_name = eval_result.eval_name
eval_results[eval_name] = eval_result
results = []
print(eval_results)
for v in eval_results.values():
try:
v.to_dict() # we test if the dict version is complete
results.append(v)
except KeyError: # not all eval values present
print("key error")
continue
return results