import os import time import traceback from config_store import ( get_process_config, get_inference_config, get_openvino_config, get_pytorch_config, ) import gradio as gr from huggingface_hub import create_repo, whoami from optimum_benchmark.launchers.device_isolation_utils import * # noqa from optimum_benchmark.backends.openvino.utils import TASKS_TO_OVMODEL from optimum_benchmark.backends.transformers_utils import TASKS_TO_MODEL_LOADERS from optimum_benchmark import ( Benchmark, BenchmarkConfig, ProcessConfig, InferenceConfig, PyTorchConfig, OVConfig, ) from optimum_benchmark.logging_utils import setup_logging DEVICE = "cpu" LAUNCHER = "process" SCENARIO = "inference" BACKENDS = ["pytorch", "openvino"] MODELS = [ "openai-community/gpt2", "google-bert/bert-base-uncased", "hf-internal-testing/tiny-random-LlamaForCausalLM", "hf-internal-testing/tiny-random-BertForSequenceClassification", ] MODELS_TO_TASKS = { "openai-community/gpt2": "text-generation", "google-bert/bert-base-uncased": "text-classification", "hf-internal-testing/tiny-random-LlamaForCausalLM": "text-generation", "hf-internal-testing/tiny-random-BertForSequenceClassification": "text-classification", } TASKS = set(TASKS_TO_OVMODEL.keys()) & set(TASKS_TO_MODEL_LOADERS.keys()) def run_benchmark(kwargs, oauth_token: gr.OAuthToken): if oauth_token.token is None: gr.Error("Please login to be able to run the benchmark.") return tuple(None for _ in BACKENDS) timestamp = time.strftime("%Y-%m-%d-%H-%M-%S") username = whoami(oauth_token.token)["name"] repo_id = f"{username}/benchmarks" token = oauth_token.token create_repo(repo_id, token=token, repo_type="dataset", exist_ok=True) gr.Info(f'Created repository "{repo_id}" where results will be pushed.') configs = { "process": {}, "inference": {}, "pytorch": {}, "openvino": {}, } for key, value in kwargs.items(): if key.label == "model": model = value elif key.label == "task": task = value elif key.label == "backends": backends = value elif "." in key.label: backend, argument = key.label.split(".") configs[backend][argument] = value else: continue for key in configs.keys(): for k, v in configs[key].items(): if k in ["input_shapes", "generate_kwargs", "numactl_kwargs"]: configs[key][k] = eval(v) configs["process"] = ProcessConfig(**configs.pop("process")) configs["inference"] = InferenceConfig(**configs.pop("inference")) configs["pytorch"] = PyTorchConfig( task=task, model=model, device=DEVICE, **configs["pytorch"], ) configs["openvino"] = OVConfig( task=task, model=model, device=DEVICE, **configs["openvino"], ) outputs = { "pytorch": "Running benchmark for PyTorch backend", "openvino": "Running benchmark for OpenVINO backend", } yield tuple(outputs[b] for b in BACKENDS) for backend in backends: try: benchmark_name = f"{timestamp}/{backend}" benchmark_config = BenchmarkConfig( name=benchmark_name, backend=configs[backend], launcher=configs[LAUNCHER], scenario=configs[SCENARIO], ) benchmark_config.push_to_hub( repo_id=repo_id, subfolder=benchmark_name, token=oauth_token.token ) benchmark_report = Benchmark.launch(benchmark_config) benchmark_report.push_to_hub( repo_id=repo_id, subfolder=benchmark_name, token=oauth_token.token ) benchmark = Benchmark(config=benchmark_config, report=benchmark_report) benchmark.push_to_hub( repo_id=repo_id, subfolder=benchmark_name, token=oauth_token.token ) gr.Info(f"Pushed benchmark to {username}/benchmarks/{benchmark_name}") outputs[backend] = f"\n{benchmark_report.to_markdown_text()}" yield tuple(outputs[b] for b in BACKENDS) except Exception: gr.Error(f"Error while running benchmark for {backend}") outputs[backend] = f"\n```python\n{traceback.format_exc()}```" yield tuple(outputs[b] for b in BACKENDS) def build_demo(): with gr.Blocks() as demo: # add login button gr.LoginButton(min_width=250) # add image gr.HTML( """""" "

🤗 Optimum-Benchmark Interface 🏋️

" "

" "This Space uses Optimum-Benchmark to automatically benchmark a model from the Hub on different backends." "
The results (config and report) will be pushed under your namespace in a benchmark repository on the Hub." "

" ) model = gr.Dropdown( label="model", choices=MODELS, value=MODELS[0], info="Model to run the benchmark on.", ) task = gr.Dropdown( label="task", choices=TASKS, value="feature-extraction", info="Task to run the benchmark on.", ) backends = gr.CheckboxGroup( interactive=True, label="backends", choices=BACKENDS, value=BACKENDS, info="Backends to run the benchmark on.", ) with gr.Row(): with gr.Accordion(label="Process Config", open=False, visible=True): process_config = get_process_config() with gr.Row(): with gr.Accordion(label="Inference Config", open=False, visible=True): inference_config = get_inference_config() with gr.Row() as backend_configs: with gr.Accordion(label="PyTorch Config", open=False, visible=True): pytorch_config = get_pytorch_config() with gr.Accordion(label="OpenVINO Config", open=False, visible=True): openvino_config = get_openvino_config() with gr.Row(): button = gr.Button(value="Run Benchmark", variant="primary") with gr.Row() as markdown_outputs: with gr.Accordion(label="PyTorch Output", open=True, visible=True): pytorch_output = gr.Markdown() with gr.Accordion(label="OpenVINO Output", open=True, visible=True): openvino_output = gr.Markdown() model.change( inputs=model, outputs=task, fn=lambda value: MODELS_TO_TASKS[value] ) backends.change( inputs=backends, outputs=backend_configs.children, fn=lambda values: [ gr.update(visible=value in values) for value in BACKENDS ], ) backends.change( inputs=backends, outputs=markdown_outputs.children, fn=lambda values: [ gr.update(visible=value in values) for value in BACKENDS ], ) button.click( fn=run_benchmark, inputs={ task, model, backends, *process_config.values(), *inference_config.values(), *pytorch_config.values(), *openvino_config.values(), }, outputs={ pytorch_output, openvino_output, }, concurrency_limit=1, ) return demo demo = build_demo() if __name__ == "__main__": os.environ["LOG_TO_FILE"] = "0" os.environ["LOG_LEVEL"] = "INFO" setup_logging(level="INFO", prefix="MAIN-PROCESS") demo.queue(max_size=10).launch()