Spaces:
Sleeping
Sleeping
meg-huggingface
commited on
Commit
•
72d2b05
1
Parent(s):
c9946e0
Switching to normalized task name.
Browse files
app.py
CHANGED
@@ -9,7 +9,7 @@ from huggingface_hub import HfApi, snapshot_download, ModelInfo, list_models
|
|
9 |
from enum import Enum
|
10 |
|
11 |
|
12 |
-
OWNER = "
|
13 |
COMPUTE_SPACE = f"{OWNER}/launch-computation-example"
|
14 |
|
15 |
|
@@ -22,7 +22,6 @@ task_mappings = {'automatic speech recognition':'automatic-speech-recognition',
|
|
22 |
'Image to Text':'image-to-text', 'Question Answering':'question-answering', 'Text Generation': 'text-generation',
|
23 |
'Image Classification':'image-classification', 'Sentence Similarity': 'sentence-similarity',
|
24 |
'Image Generation':'image-generation', 'Summarization':'summarization'}
|
25 |
-
|
26 |
@dataclass
|
27 |
class ModelDetails:
|
28 |
name: str
|
@@ -47,29 +46,25 @@ def add_docker_eval(zip_file):
|
|
47 |
new_fid = new_fid_list[-1]
|
48 |
if new_fid.endswith('.zip'):
|
49 |
API.upload_file(
|
50 |
-
path_or_fileobj=zip_file
|
51 |
-
repo_id="
|
52 |
path_in_repo='submitted_models/'+new_fid,
|
53 |
repo_type="dataset",
|
54 |
commit_message="Adding logs via submission Space.",
|
55 |
-
token=
|
56 |
)
|
57 |
gr.Info('Uploaded logs to dataset! We will validate their validity and add them to the next version of the leaderboard.')
|
58 |
else:
|
59 |
gr.Info('You can only upload .zip files here!')
|
60 |
|
61 |
|
62 |
-
def add_new_eval(
|
63 |
-
repo_id: str,
|
64 |
-
task: str,
|
65 |
-
):
|
66 |
model_owner = repo_id.split("/")[0]
|
67 |
-
model_name = repo_id.split("/")[1]
|
68 |
-
model_list=[]
|
69 |
current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
70 |
-
requests= load_dataset("
|
71 |
requests_dset = requests.to_pandas()
|
72 |
-
model_list= requests_dset[requests_dset['status'] == 'COMPLETED']['model'].tolist()
|
73 |
task_models = list(API.list_models(filter=task_mappings[task]))
|
74 |
task_model_names = [m.id for m in task_models]
|
75 |
if repo_id in model_list:
|
@@ -80,20 +75,21 @@ def add_new_eval(
|
|
80 |
# Is the model info correctly filled?
|
81 |
try:
|
82 |
model_info = API.model_info(repo_id=repo_id)
|
|
|
|
|
83 |
except Exception:
|
84 |
-
gr.Info("Could not find information for model %s" % (
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
gr.Info("Adding request")
|
89 |
-
|
90 |
-
|
91 |
request_dict = {
|
92 |
"model": repo_id,
|
93 |
"status": "PENDING",
|
94 |
"submitted_time": pd.to_datetime(current_time),
|
95 |
-
"task": task,
|
96 |
-
"likes":
|
97 |
"params": model_size,
|
98 |
"leaderboard_version": "v0",}
|
99 |
#"license": license,
|
@@ -104,17 +100,17 @@ def add_new_eval(
|
|
104 |
df_request_dict = pd.DataFrame([request_dict])
|
105 |
print(df_request_dict)
|
106 |
df_final = pd.concat([requests_dset, df_request_dict], ignore_index=True)
|
107 |
-
updated_dset =Dataset.from_pandas(df_final)
|
108 |
-
updated_dset.push_to_hub("
|
109 |
|
110 |
gr.Info("Starting compute space at %s " % COMPUTE_SPACE)
|
111 |
return start_compute_space()
|
112 |
|
113 |
|
114 |
def print_existing_models():
|
115 |
-
requests= load_dataset("
|
116 |
requests_dset = requests.to_pandas()
|
117 |
-
model_df= requests_dset[['model','status']]
|
118 |
model_df = model_df[model_df['status'] == 'COMPLETED']
|
119 |
return model_df
|
120 |
|
@@ -127,7 +123,7 @@ def highlight_cols(x):
|
|
127 |
|
128 |
# Applying the style function
|
129 |
existing_models = print_existing_models()
|
130 |
-
formatted_df = existing_models.style.apply(highlight_cols, axis
|
131 |
|
132 |
def get_leaderboard_models():
|
133 |
path = r'leaderboard_v0_data/energy'
|
@@ -148,9 +144,9 @@ with gr.Blocks() as demo:
|
|
148 |
with gr.Row():
|
149 |
with gr.Column():
|
150 |
task = gr.Dropdown(
|
151 |
-
choices=task_mappings.keys(),
|
152 |
label="Choose a benchmark task",
|
153 |
-
value
|
154 |
multiselect=False,
|
155 |
interactive=True,
|
156 |
)
|
@@ -171,15 +167,15 @@ with gr.Blocks() as demo:
|
|
171 |
)
|
172 |
with gr.Row():
|
173 |
with gr.Column():
|
174 |
-
with gr.Accordion("Submit log files from a Docker run:", open
|
175 |
gr.Markdown("If you've already benchmarked your model using the [Docker file](https://github.com/huggingface/EnergyStarAI/) provided, please upload the **entire run log directory** (in .zip format) below:")
|
176 |
file_output = gr.File(visible=False)
|
177 |
u = gr.UploadButton("Upload a zip file with logs", file_count="single")
|
178 |
-
u.upload(add_docker_eval,u, file_output)
|
179 |
with gr.Row():
|
180 |
with gr.Column():
|
181 |
-
with gr.Accordion("Models that are in the latest leaderboard version:", open
|
182 |
gr.Dataframe(get_leaderboard_models())
|
183 |
-
with gr.Accordion("Models that have been benchmarked recently:", open
|
184 |
gr.Dataframe(formatted_df)
|
185 |
demo.launch()
|
|
|
9 |
from enum import Enum
|
10 |
|
11 |
|
12 |
+
OWNER = "AIEnergyScore"
|
13 |
COMPUTE_SPACE = f"{OWNER}/launch-computation-example"
|
14 |
|
15 |
|
|
|
22 |
'Image to Text':'image-to-text', 'Question Answering':'question-answering', 'Text Generation': 'text-generation',
|
23 |
'Image Classification':'image-classification', 'Sentence Similarity': 'sentence-similarity',
|
24 |
'Image Generation':'image-generation', 'Summarization':'summarization'}
|
|
|
25 |
@dataclass
|
26 |
class ModelDetails:
|
27 |
name: str
|
|
|
46 |
new_fid = new_fid_list[-1]
|
47 |
if new_fid.endswith('.zip'):
|
48 |
API.upload_file(
|
49 |
+
path_or_fileobj=zip_file,
|
50 |
+
repo_id="AIEnergyScore/tested_proprietary_models",
|
51 |
path_in_repo='submitted_models/'+new_fid,
|
52 |
repo_type="dataset",
|
53 |
commit_message="Adding logs via submission Space.",
|
54 |
+
token=TOKEN
|
55 |
)
|
56 |
gr.Info('Uploaded logs to dataset! We will validate their validity and add them to the next version of the leaderboard.')
|
57 |
else:
|
58 |
gr.Info('You can only upload .zip files here!')
|
59 |
|
60 |
|
61 |
+
def add_new_eval(repo_id: str, task: str):
|
|
|
|
|
|
|
62 |
model_owner = repo_id.split("/")[0]
|
63 |
+
model_name = repo_id.split("/")[1]
|
|
|
64 |
current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
65 |
+
requests = load_dataset("AIEnergyScore/requests_debug", split="test", token=TOKEN)
|
66 |
requests_dset = requests.to_pandas()
|
67 |
+
model_list = requests_dset[requests_dset['status'] == 'COMPLETED']['model'].tolist()
|
68 |
task_models = list(API.list_models(filter=task_mappings[task]))
|
69 |
task_model_names = [m.id for m in task_models]
|
70 |
if repo_id in model_list:
|
|
|
75 |
# Is the model info correctly filled?
|
76 |
try:
|
77 |
model_info = API.model_info(repo_id=repo_id)
|
78 |
+
model_size = get_model_size(model_info=model_info)
|
79 |
+
likes = model_info.likes
|
80 |
except Exception:
|
81 |
+
gr.Info("Could not find information for model %s" % (model_name))
|
82 |
+
model_size = None
|
83 |
+
likes = None
|
84 |
+
|
85 |
gr.Info("Adding request")
|
86 |
+
|
|
|
87 |
request_dict = {
|
88 |
"model": repo_id,
|
89 |
"status": "PENDING",
|
90 |
"submitted_time": pd.to_datetime(current_time),
|
91 |
+
"task": task_mappings[task],
|
92 |
+
"likes": likes,
|
93 |
"params": model_size,
|
94 |
"leaderboard_version": "v0",}
|
95 |
#"license": license,
|
|
|
100 |
df_request_dict = pd.DataFrame([request_dict])
|
101 |
print(df_request_dict)
|
102 |
df_final = pd.concat([requests_dset, df_request_dict], ignore_index=True)
|
103 |
+
updated_dset = Dataset.from_pandas(df_final)
|
104 |
+
updated_dset.push_to_hub("AIEnergyScore/requests_debug", split="test", token=TOKEN)
|
105 |
|
106 |
gr.Info("Starting compute space at %s " % COMPUTE_SPACE)
|
107 |
return start_compute_space()
|
108 |
|
109 |
|
110 |
def print_existing_models():
|
111 |
+
requests= load_dataset("AIEnergyScore/requests_debug", split="test", token=TOKEN)
|
112 |
requests_dset = requests.to_pandas()
|
113 |
+
model_df= requests_dset[['model', 'status']]
|
114 |
model_df = model_df[model_df['status'] == 'COMPLETED']
|
115 |
return model_df
|
116 |
|
|
|
123 |
|
124 |
# Applying the style function
|
125 |
existing_models = print_existing_models()
|
126 |
+
formatted_df = existing_models.style.apply(highlight_cols, axis=None)
|
127 |
|
128 |
def get_leaderboard_models():
|
129 |
path = r'leaderboard_v0_data/energy'
|
|
|
144 |
with gr.Row():
|
145 |
with gr.Column():
|
146 |
task = gr.Dropdown(
|
147 |
+
choices=list(task_mappings.keys()),
|
148 |
label="Choose a benchmark task",
|
149 |
+
value='Text Generation',
|
150 |
multiselect=False,
|
151 |
interactive=True,
|
152 |
)
|
|
|
167 |
)
|
168 |
with gr.Row():
|
169 |
with gr.Column():
|
170 |
+
with gr.Accordion("Submit log files from a Docker run:", open=False):
|
171 |
gr.Markdown("If you've already benchmarked your model using the [Docker file](https://github.com/huggingface/EnergyStarAI/) provided, please upload the **entire run log directory** (in .zip format) below:")
|
172 |
file_output = gr.File(visible=False)
|
173 |
u = gr.UploadButton("Upload a zip file with logs", file_count="single")
|
174 |
+
u.upload(add_docker_eval, u, file_output)
|
175 |
with gr.Row():
|
176 |
with gr.Column():
|
177 |
+
with gr.Accordion("Models that are in the latest leaderboard version:", open=False):
|
178 |
gr.Dataframe(get_leaderboard_models())
|
179 |
+
with gr.Accordion("Models that have been benchmarked recently:", open=False):
|
180 |
gr.Dataframe(formatted_df)
|
181 |
demo.launch()
|