Spaces:
Running
Running
File size: 7,400 Bytes
4d423a9 111d85e 7682345 10c3b6b c9751c2 4d423a9 6c813f8 10c3b6b 4d423a9 7682345 4d423a9 7682345 4d423a9 6c813f8 4d423a9 e7932f8 4d423a9 47984ee 111d85e bfd434f 111d85e 4d423a9 111d85e 4d423a9 47984ee 4d423a9 47984ee 4d423a9 47984ee 4d423a9 10c3b6b 4d423a9 b680a21 47984ee 4d423a9 b680a21 47984ee 4d423a9 b680a21 4d423a9 bfd434f 4d423a9 47984ee 4d423a9 47984ee 4d423a9 47984ee 4d423a9 47984ee 4d423a9 bfd434f 4d423a9 bfd434f 4d423a9 bfd434f 4d423a9 bfd434f 4d99b49 bfd434f 4d423a9 bfd434f 4d423a9 bfd434f 47984ee bfd434f 47984ee bfd434f 4d423a9 47984ee 4d423a9 bfd434f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 |
import os
import time
import traceback
from typing import Optional
from config_store import (
get_process_config,
get_inference_config,
get_openvino_config,
get_pytorch_config,
)
import gradio as gr
from huggingface_hub import create_repo, whoami
from gradio_huggingfacehub_search import HuggingfaceHubSearch
from optimum_benchmark.launchers.device_isolation_utils import * # noqa
from optimum_benchmark.backends.openvino.utils import TASKS_TO_OVMODEL
from optimum_benchmark.backends.transformers_utils import TASKS_TO_MODEL_LOADERS
from optimum_benchmark import (
Benchmark,
BenchmarkConfig,
ProcessConfig,
InferenceConfig,
PyTorchConfig,
OVConfig,
)
from optimum_benchmark.logging_utils import setup_logging
from optimum_benchmark.task_utils import infer_task_from_model_name_or_path
DEVICE = "cpu"
LAUNCHER = "process"
SCENARIO = "inference"
BACKENDS = ["pytorch", "openvino"]
TASKS = set(TASKS_TO_OVMODEL.keys()) & set(TASKS_TO_MODEL_LOADERS.keys())
def run_benchmark(inputs, oauth_token: Optional[gr.OAuthToken] = None):
if oauth_token.token is None or oauth_token.token == "":
raise gr.Error("Please login to be able to run the benchmark.")
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S")
name = whoami(oauth_token.token)["name"]
repo_id = f"{name}/benchmarks"
token = oauth_token.token
try:
create_repo(repo_id, token=token, repo_type="dataset", exist_ok=True)
gr.Info(f"π Created dataset repository {repo_id} on the Hub.")
except Exception:
raise gr.Error(f"Error while creating dataset repository {repo_id} on the Hub.")
configs = {"process": {}, "inference": {}, "pytorch": {}, "openvino": {}}
for key, value in inputs.items():
if key.label == "model":
model = value
elif key.label == "task":
task = value
elif "." in key.label:
backend, argument = key.label.split(".")
configs[backend][argument] = value
else:
continue
for key in configs.keys():
for k, v in configs[key].items():
if k in ["input_shapes", "generate_kwargs", "numactl_kwargs"]:
configs[key][k] = eval(v)
configs["process"] = ProcessConfig(**configs.pop("process"))
configs["inference"] = InferenceConfig(**configs.pop("inference"))
configs["pytorch"] = PyTorchConfig(
task=task, model=model, device=DEVICE, **configs["pytorch"]
)
configs["openvino"] = OVConfig(
task=task, model=model, device=DEVICE, **configs["openvino"]
)
outputs = {
"pytorch": "Running benchmark for PyTorch backend",
"openvino": "Running benchmark for OpenVINO backend",
}
yield tuple(outputs[b] for b in BACKENDS)
for backend in BACKENDS:
try:
benchmark_name = f"{timestamp}/{backend}"
benchmark_config = BenchmarkConfig(
name=benchmark_name,
backend=configs[backend],
launcher=configs[LAUNCHER],
scenario=configs[SCENARIO],
)
benchmark_config.push_to_hub(
repo_id=repo_id, subfolder=benchmark_name, token=oauth_token.token
)
benchmark_report = Benchmark.launch(benchmark_config)
benchmark_report.push_to_hub(
repo_id=repo_id, subfolder=benchmark_name, token=oauth_token.token
)
benchmark = Benchmark(config=benchmark_config, report=benchmark_report)
benchmark.push_to_hub(
repo_id=repo_id, subfolder=benchmark_name, token=oauth_token.token
)
except Exception:
outputs[backend] = f"\n```python-traceback\n{traceback.format_exc()}```\n"
yield tuple(outputs[b] for b in BACKENDS)
gr.Info(f"β Error while running benchmark for {backend} backend.")
else:
outputs[backend] = f"\n{benchmark_report.to_markdown_text()}\n"
yield tuple(outputs[b] for b in BACKENDS)
gr.Info(f"β
Benchmark for {backend} backend ran successfully.")
def update_task(model_id):
try:
inferred_task = infer_task_from_model_name_or_path(model_id)
except Exception:
raise gr.Error(
f"Error while inferring task for {model_id}, please select a task manually."
)
if inferred_task not in TASKS:
raise gr.Error(
f"Task {inferred_task} is not supported by OpenVINO, please select a task manually."
)
return inferred_task
with gr.Blocks() as demo:
# add login button
gr.LoginButton()
# add image
gr.HTML(
"""<img src="https://huggingface.co/spaces/optimum/optimum-benchmark-ui/resolve/main/huggy_bench.png" style="display: block; margin-left: auto; margin-right: auto; width: 30%;">"""
"<h1 style='text-align: center'>π€ Optimum-Benchmark Interface ποΈ</h1>"
"<p style='text-align: center'>"
"This Space uses <a href='https://github.com/huggingface/optimum-benchmark.git'>Optimum-Benchmark</a> to automatically benchmark a model from the Hub on different backends."
"<br>The results (config and report) will be pushed under your namespace in a benchmark repository on the Hub."
"</p>"
)
with gr.Column(variant="panel"):
model = HuggingfaceHubSearch(
placeholder="Search for a model",
sumbit_on_select=True,
search_type="model",
label="model",
)
with gr.Row():
task = gr.Dropdown(
info="Task to run the benchmark on.",
elem_id="task-dropdown",
choices=TASKS,
label="task",
)
with gr.Column(variant="panel"):
with gr.Accordion(label="Process Config", open=False, visible=True):
process_config = get_process_config()
with gr.Accordion(label="Inference Config", open=False, visible=True):
inference_config = get_inference_config()
with gr.Row() as backend_configs:
with gr.Accordion(label="PyTorch Config", open=False, visible=True):
pytorch_config = get_pytorch_config()
with gr.Accordion(label="OpenVINO Config", open=False, visible=True):
openvino_config = get_openvino_config()
with gr.Row():
button = gr.Button(value="Run Benchmark", variant="primary")
with gr.Row():
with gr.Accordion(label="PyTorch Report", open=True, visible=True):
pytorch_report = gr.Markdown()
with gr.Accordion(label="OpenVINO Report", open=True, visible=True):
openvino_report = gr.Markdown()
model.submit(inputs=model, outputs=task, fn=update_task)
button.click(
fn=run_benchmark,
inputs={
task,
model,
# backends,
*process_config.values(),
*inference_config.values(),
*pytorch_config.values(),
*openvino_config.values(),
},
outputs={
pytorch_report,
openvino_report,
},
concurrency_limit=1,
)
if __name__ == "__main__":
os.environ["LOG_TO_FILE"] = "0"
os.environ["LOG_LEVEL"] = "INFO"
setup_logging(level="INFO", prefix="MAIN-PROCESS")
demo.queue(max_size=10).launch()
|