File size: 8,205 Bytes
4d423a9 7682345 10c3b6b c9751c2 4d423a9 10c3b6b 4d423a9 7682345 4d423a9 7682345 4d423a9 e7932f8 049539c 4d99b49 049539c 4d99b49 4d423a9 10c3b6b 4d423a9 b680a21 4d423a9 10c3b6b 4d423a9 b680a21 4d423a9 b680a21 4d423a9 b680a21 4d423a9 b680a21 4d423a9 b680a21 4d423a9 b680a21 4d423a9 7682345 4d423a9 7682345 4d423a9 4d99b49 4d423a9 10c3b6b 4d423a9 7682345 4d423a9 4d99b49 4d423a9 7682345 4d423a9 4d99b49 4d423a9 4d99b49 4d423a9 7682345 4d423a9 7682345 4d423a9 10c3b6b 4d423a9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 |
import os
import time
import traceback
from config_store import (
get_process_config,
get_inference_config,
get_openvino_config,
get_pytorch_config,
)
import gradio as gr
from huggingface_hub import create_repo, whoami
from optimum_benchmark.launchers.device_isolation_utils import * # noqa
from optimum_benchmark.backends.openvino.utils import TASKS_TO_OVMODEL
from optimum_benchmark.backends.transformers_utils import TASKS_TO_MODEL_LOADERS
from optimum_benchmark import (
Benchmark,
BenchmarkConfig,
ProcessConfig,
InferenceConfig,
PyTorchConfig,
OVConfig,
)
from optimum_benchmark.logging_utils import setup_logging
DEVICE = "cpu"
LAUNCHER = "process"
SCENARIO = "inference"
BACKENDS = ["pytorch", "openvino"]
MODELS = [
"openai-community/gpt2",
"google-bert/bert-base-uncased",
"hf-internal-testing/tiny-random-LlamaForCausalLM",
"hf-internal-testing/tiny-random-BertForSequenceClassification",
]
MODELS_TO_TASKS = {
"openai-community/gpt2": "text-generation",
"google-bert/bert-base-uncased": "text-classification",
"hf-internal-testing/tiny-random-LlamaForCausalLM": "text-generation",
"hf-internal-testing/tiny-random-BertForSequenceClassification": "text-classification",
}
TASKS = set(TASKS_TO_OVMODEL.keys()) & set(TASKS_TO_MODEL_LOADERS.keys())
def run_benchmark(kwargs, oauth_token: gr.OAuthToken):
if oauth_token.token is None:
gr.Error("Please login to be able to run the benchmark.")
return tuple(None for _ in BACKENDS)
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S")
username = whoami(oauth_token.token)["name"]
repo_id = f"{username}/benchmarks"
token = oauth_token.token
create_repo(repo_id, token=token, repo_type="dataset", exist_ok=True)
gr.Info(f'Created repository "{repo_id}" where results will be pushed.')
configs = {
"process": {},
"inference": {},
"pytorch": {},
"openvino": {},
}
for key, value in kwargs.items():
if key.label == "model":
model = value
elif key.label == "task":
task = value
elif key.label == "backends":
backends = value
elif "." in key.label:
backend, argument = key.label.split(".")
configs[backend][argument] = value
else:
continue
for key in configs.keys():
for k, v in configs[key].items():
if k in ["input_shapes", "generate_kwargs", "numactl_kwargs"]:
configs[key][k] = eval(v)
configs["process"] = ProcessConfig(**configs.pop("process"))
configs["inference"] = InferenceConfig(**configs.pop("inference"))
configs["pytorch"] = PyTorchConfig(
task=task,
model=model,
device=DEVICE,
**configs["pytorch"],
)
configs["openvino"] = OVConfig(
task=task,
model=model,
device=DEVICE,
**configs["openvino"],
)
outputs = {
"pytorch": "Running benchmark for PyTorch backend",
"openvino": "Running benchmark for OpenVINO backend",
}
yield tuple(outputs[b] for b in BACKENDS)
for backend in backends:
try:
benchmark_name = f"{timestamp}/{backend}"
benchmark_config = BenchmarkConfig(
name=benchmark_name,
backend=configs[backend],
launcher=configs[LAUNCHER],
scenario=configs[SCENARIO],
)
benchmark_config.push_to_hub(
repo_id=repo_id, subfolder=benchmark_name, token=oauth_token.token
)
benchmark_report = Benchmark.launch(benchmark_config)
benchmark_report.push_to_hub(
repo_id=repo_id, subfolder=benchmark_name, token=oauth_token.token
)
benchmark = Benchmark(config=benchmark_config, report=benchmark_report)
benchmark.push_to_hub(
repo_id=repo_id, subfolder=benchmark_name, token=oauth_token.token
)
gr.Info(f"Pushed benchmark to {username}/benchmarks/{benchmark_name}")
outputs[backend] = f"\n{benchmark_report.to_markdown_text()}"
yield tuple(outputs[b] for b in BACKENDS)
except Exception:
gr.Error(f"Error while running benchmark for {backend}")
outputs[backend] = f"\n```python\n{traceback.format_exc()}```"
yield tuple(outputs[b] for b in BACKENDS)
def build_demo():
with gr.Blocks() as demo:
# add login button
gr.LoginButton(min_width=250)
# add image
gr.HTML(
"""<img src="https://huggingface.co/spaces/optimum/optimum-benchmark-ui/resolve/main/huggy_bench.png" style="display: block; margin-left: auto; margin-right: auto; width: 30%;">"""
"<h1 style='text-align: center'>🤗 Optimum-Benchmark Interface 🏋️</h1>"
"<p style='text-align: center'>"
"This Space uses <a href='https://github.com/huggingface/optimum-benchmark.git'>Optimum-Benchmark</a> to automatically benchmark a model from the Hub on different backends."
"<br>The results (config and report) will be pushed under your namespace in a benchmark repository on the Hub."
"</p>"
)
model = gr.Dropdown(
label="model",
choices=MODELS,
value=MODELS[0],
info="Model to run the benchmark on.",
)
task = gr.Dropdown(
label="task",
choices=TASKS,
value="feature-extraction",
info="Task to run the benchmark on.",
)
backends = gr.CheckboxGroup(
interactive=True,
label="backends",
choices=BACKENDS,
value=BACKENDS,
info="Backends to run the benchmark on.",
)
with gr.Row():
with gr.Accordion(label="Process Config", open=False, visible=True):
process_config = get_process_config()
with gr.Row():
with gr.Accordion(label="Inference Config", open=False, visible=True):
inference_config = get_inference_config()
with gr.Row() as backend_configs:
with gr.Accordion(label="PyTorch Config", open=False, visible=True):
pytorch_config = get_pytorch_config()
with gr.Accordion(label="OpenVINO Config", open=False, visible=True):
openvino_config = get_openvino_config()
with gr.Row():
button = gr.Button(value="Run Benchmark", variant="primary")
with gr.Row() as markdown_outputs:
with gr.Accordion(label="PyTorch Output", open=True, visible=True):
pytorch_output = gr.Markdown()
with gr.Accordion(label="OpenVINO Output", open=True, visible=True):
openvino_output = gr.Markdown()
model.change(
inputs=model, outputs=task, fn=lambda value: MODELS_TO_TASKS[value]
)
backends.change(
inputs=backends,
outputs=backend_configs.children,
fn=lambda values: [
gr.update(visible=value in values) for value in BACKENDS
],
)
backends.change(
inputs=backends,
outputs=markdown_outputs.children,
fn=lambda values: [
gr.update(visible=value in values) for value in BACKENDS
],
)
button.click(
fn=run_benchmark,
inputs={
task,
model,
backends,
*process_config.values(),
*inference_config.values(),
*pytorch_config.values(),
*openvino_config.values(),
},
outputs={
pytorch_output,
openvino_output,
},
concurrency_limit=1,
)
return demo
demo = build_demo()
if __name__ == "__main__":
os.environ["LOG_TO_FILE"] = "0"
os.environ["LOG_LEVEL"] = "INFO"
setup_logging(level="INFO", prefix="MAIN-PROCESS")
demo.queue(max_size=10).launch()
|