Spaces:
Sleeping
Sleeping
Commit
ยท
bfd434f
1
Parent(s):
6c813f8
changes
Browse files
app.py
CHANGED
@@ -38,10 +38,9 @@ TASKS = set(TASKS_TO_OVMODEL.keys()) & set(TASKS_TO_MODEL_LOADERS.keys())
|
|
38 |
|
39 |
def run_benchmark(kwargs, oauth_token: Optional[gr.OAuthToken] = None):
|
40 |
if oauth_token.token is None or oauth_token.token == "":
|
41 |
-
|
42 |
-
return tuple(None for _ in BACKENDS)
|
43 |
|
44 |
-
|
45 |
|
46 |
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S")
|
47 |
name = whoami(oauth_token.token)["name"]
|
@@ -63,8 +62,6 @@ def run_benchmark(kwargs, oauth_token: Optional[gr.OAuthToken] = None):
|
|
63 |
model = value
|
64 |
elif key.label == "task":
|
65 |
task = value
|
66 |
-
elif key.label == "backends":
|
67 |
-
backends = value
|
68 |
elif "." in key.label:
|
69 |
backend, argument = key.label.split(".")
|
70 |
configs[backend][argument] = value
|
@@ -98,7 +95,9 @@ def run_benchmark(kwargs, oauth_token: Optional[gr.OAuthToken] = None):
|
|
98 |
|
99 |
yield tuple(outputs[b] for b in BACKENDS)
|
100 |
|
101 |
-
|
|
|
|
|
102 |
try:
|
103 |
benchmark_name = f"{timestamp}/{backend}"
|
104 |
benchmark_config = BenchmarkConfig(
|
@@ -118,121 +117,113 @@ def run_benchmark(kwargs, oauth_token: Optional[gr.OAuthToken] = None):
|
|
118 |
benchmark.push_to_hub(
|
119 |
repo_id=repo_id, subfolder=benchmark_name, token=oauth_token.token
|
120 |
)
|
121 |
-
gr.Info(f"Pushed benchmark to {repo_id}/{benchmark_name}")
|
122 |
|
123 |
outputs[backend] = f"\n{benchmark_report.to_markdown_text()}"
|
124 |
|
125 |
yield tuple(outputs[b] for b in BACKENDS)
|
126 |
|
127 |
-
|
128 |
-
gr.Error(f"Error while running benchmark for {backend}")
|
129 |
|
|
|
130 |
outputs[backend] = f"\n```python\n{traceback.format_exc()}```"
|
131 |
|
132 |
yield tuple(outputs[b] for b in BACKENDS)
|
133 |
|
134 |
-
|
135 |
|
|
|
136 |
|
137 |
-
def build_demo():
|
138 |
-
with gr.Blocks() as demo:
|
139 |
-
# add login button
|
140 |
-
gr.LoginButton()
|
141 |
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
"
|
148 |
-
"<br>The results (config and report) will be pushed under your namespace in a benchmark repository on the Hub."
|
149 |
-
"</p>"
|
150 |
)
|
151 |
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
value="openai-community/gpt2",
|
156 |
-
placeholder="Search for a model",
|
157 |
-
sumbit_on_select=True,
|
158 |
-
)
|
159 |
-
task = gr.Dropdown(
|
160 |
-
label="task",
|
161 |
-
choices=TASKS,
|
162 |
-
value="text-generation",
|
163 |
-
info="Task to run the benchmark on.",
|
164 |
-
)
|
165 |
-
backends = gr.CheckboxGroup(
|
166 |
-
interactive=True,
|
167 |
-
label="backends",
|
168 |
-
choices=BACKENDS,
|
169 |
-
value=BACKENDS,
|
170 |
-
info="Backends to run the benchmark on.",
|
171 |
)
|
172 |
|
173 |
-
|
174 |
-
with gr.Accordion(label="Process Config", open=False, visible=True):
|
175 |
-
process_config = get_process_config()
|
176 |
-
with gr.Row():
|
177 |
-
with gr.Accordion(label="Inference Config", open=False, visible=True):
|
178 |
-
inference_config = get_inference_config()
|
179 |
|
180 |
-
with gr.Row() as backend_configs:
|
181 |
-
with gr.Accordion(label="PyTorch Config", open=False, visible=True):
|
182 |
-
pytorch_config = get_pytorch_config()
|
183 |
-
with gr.Accordion(label="OpenVINO Config", open=False, visible=True):
|
184 |
-
openvino_config = get_openvino_config()
|
185 |
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
with gr.Row() as markdown_outputs:
|
190 |
-
with gr.Accordion(label="PyTorch Output", open=True, visible=True):
|
191 |
-
pytorch_output = gr.Markdown()
|
192 |
-
with gr.Accordion(label="OpenVINO Output", open=True, visible=True):
|
193 |
-
openvino_output = gr.Markdown()
|
194 |
-
|
195 |
-
model.submit(inputs=model, outputs=task, fn=infer_task_from_model_name_or_path)
|
196 |
-
|
197 |
-
backends.change(
|
198 |
-
inputs=backends,
|
199 |
-
outputs=backend_configs.children,
|
200 |
-
fn=lambda values: [
|
201 |
-
gr.update(visible=value in values) for value in BACKENDS
|
202 |
-
],
|
203 |
-
)
|
204 |
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
)
|
|
|
|
|
212 |
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
*process_config.values(),
|
220 |
-
*inference_config.values(),
|
221 |
-
*pytorch_config.values(),
|
222 |
-
*openvino_config.values(),
|
223 |
-
},
|
224 |
-
outputs={
|
225 |
-
pytorch_output,
|
226 |
-
openvino_output,
|
227 |
-
},
|
228 |
-
concurrency_limit=1,
|
229 |
)
|
230 |
|
231 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
232 |
|
233 |
|
234 |
if __name__ == "__main__":
|
235 |
os.environ["LOG_TO_FILE"] = "0"
|
236 |
os.environ["LOG_LEVEL"] = "INFO"
|
237 |
|
238 |
-
|
|
|
38 |
|
39 |
def run_benchmark(kwargs, oauth_token: Optional[gr.OAuthToken] = None):
|
40 |
if oauth_token.token is None or oauth_token.token == "":
|
41 |
+
yield tuple(None for _ in BACKENDS)
|
|
|
42 |
|
43 |
+
raise gr.Error("Please login to be able to run the benchmark.")
|
44 |
|
45 |
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S")
|
46 |
name = whoami(oauth_token.token)["name"]
|
|
|
62 |
model = value
|
63 |
elif key.label == "task":
|
64 |
task = value
|
|
|
|
|
65 |
elif "." in key.label:
|
66 |
backend, argument = key.label.split(".")
|
67 |
configs[backend][argument] = value
|
|
|
95 |
|
96 |
yield tuple(outputs[b] for b in BACKENDS)
|
97 |
|
98 |
+
setup_logging(level="INFO", prefix="MAIN-PROCESS")
|
99 |
+
|
100 |
+
for backend in BACKENDS:
|
101 |
try:
|
102 |
benchmark_name = f"{timestamp}/{backend}"
|
103 |
benchmark_config = BenchmarkConfig(
|
|
|
117 |
benchmark.push_to_hub(
|
118 |
repo_id=repo_id, subfolder=benchmark_name, token=oauth_token.token
|
119 |
)
|
|
|
120 |
|
121 |
outputs[backend] = f"\n{benchmark_report.to_markdown_text()}"
|
122 |
|
123 |
yield tuple(outputs[b] for b in BACKENDS)
|
124 |
|
125 |
+
gr.Info(f"Pushed benchmark to {repo_id}/{benchmark_name}")
|
|
|
126 |
|
127 |
+
except Exception:
|
128 |
outputs[backend] = f"\n```python\n{traceback.format_exc()}```"
|
129 |
|
130 |
yield tuple(outputs[b] for b in BACKENDS)
|
131 |
|
132 |
+
raise gr.Error(f"Error while running benchmark for {backend}")
|
133 |
|
134 |
+
logging.getLogger().setLevel(logging.NOTSET)
|
135 |
|
|
|
|
|
|
|
|
|
136 |
|
137 |
+
def update_task(model_id):
|
138 |
+
try:
|
139 |
+
inferred_task = infer_task_from_model_name_or_path(model_id)
|
140 |
+
except Exception:
|
141 |
+
raise gr.Error(
|
142 |
+
f"Error while inferring task for {model_id}, please select a task manually."
|
|
|
|
|
143 |
)
|
144 |
|
145 |
+
if inferred_task not in TASKS:
|
146 |
+
raise gr.Error(
|
147 |
+
f"Task {inferred_task} is not supported by OpenVINO, please select a task manually."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
148 |
)
|
149 |
|
150 |
+
return inferred_task
|
|
|
|
|
|
|
|
|
|
|
151 |
|
|
|
|
|
|
|
|
|
|
|
152 |
|
153 |
+
with gr.Blocks() as demo:
|
154 |
+
# add login button
|
155 |
+
gr.LoginButton()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
156 |
|
157 |
+
# add image
|
158 |
+
gr.HTML(
|
159 |
+
"""<img src="https://huggingface.co/spaces/optimum/optimum-benchmark-ui/resolve/main/huggy_bench.png" style="display: block; margin-left: auto; margin-right: auto; width: 30%;">"""
|
160 |
+
"<h1 style='text-align: center'>๐ค Optimum-Benchmark Interface ๐๏ธ</h1>"
|
161 |
+
"<p style='text-align: center'>"
|
162 |
+
"This Space uses <a href='https://github.com/huggingface/optimum-benchmark.git'>Optimum-Benchmark</a> to automatically benchmark a model from the Hub on different backends."
|
163 |
+
"<br>The results (config and report) will be pushed under your namespace in a benchmark repository on the Hub."
|
164 |
+
"</p>"
|
165 |
+
)
|
166 |
|
167 |
+
with gr.Column(variant="panel"):
|
168 |
+
model = HuggingfaceHubSearch(
|
169 |
+
placeholder="Search for a model",
|
170 |
+
sumbit_on_select=True,
|
171 |
+
search_type="model",
|
172 |
+
label="model",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
173 |
)
|
174 |
|
175 |
+
with gr.Row():
|
176 |
+
task = gr.Dropdown(
|
177 |
+
info="Task to run the benchmark on.",
|
178 |
+
elem_id="task-dropdown",
|
179 |
+
choices=TASKS,
|
180 |
+
label="task",
|
181 |
+
)
|
182 |
+
|
183 |
+
with gr.Column(variant="panel"):
|
184 |
+
with gr.Accordion(label="Process Config", open=False, visible=True):
|
185 |
+
process_config = get_process_config()
|
186 |
+
with gr.Accordion(label="Inference Config", open=False, visible=True):
|
187 |
+
inference_config = get_inference_config()
|
188 |
+
|
189 |
+
with gr.Row() as backend_configs:
|
190 |
+
with gr.Accordion(label="PyTorch Config", open=False, visible=True):
|
191 |
+
pytorch_config = get_pytorch_config()
|
192 |
+
with gr.Accordion(label="OpenVINO Config", open=False, visible=True):
|
193 |
+
openvino_config = get_openvino_config()
|
194 |
+
|
195 |
+
with gr.Row():
|
196 |
+
button = gr.Button(value="Run Benchmark", variant="primary")
|
197 |
+
|
198 |
+
with gr.Row() as markdown_outputs:
|
199 |
+
with gr.Accordion(label="PyTorch Output", open=True, visible=True):
|
200 |
+
pytorch_output = gr.Markdown()
|
201 |
+
with gr.Accordion(label="OpenVINO Output", open=True, visible=True):
|
202 |
+
openvino_output = gr.Markdown()
|
203 |
+
|
204 |
+
model.submit(inputs=model, outputs=task, fn=update_task)
|
205 |
+
|
206 |
+
button.click(
|
207 |
+
fn=run_benchmark,
|
208 |
+
inputs={
|
209 |
+
task,
|
210 |
+
model,
|
211 |
+
# backends,
|
212 |
+
*process_config.values(),
|
213 |
+
*inference_config.values(),
|
214 |
+
*pytorch_config.values(),
|
215 |
+
*openvino_config.values(),
|
216 |
+
},
|
217 |
+
outputs={
|
218 |
+
pytorch_output,
|
219 |
+
openvino_output,
|
220 |
+
},
|
221 |
+
concurrency_limit=1,
|
222 |
+
)
|
223 |
|
224 |
|
225 |
if __name__ == "__main__":
|
226 |
os.environ["LOG_TO_FILE"] = "0"
|
227 |
os.environ["LOG_LEVEL"] = "INFO"
|
228 |
|
229 |
+
demo.queue(max_size=10).launch()
|