Spaces:
Running
Running
| import os | |
| import time | |
| import traceback | |
| import gradio as gr | |
| from huggingface_hub import create_repo, whoami | |
| from config_store import ( | |
| get_process_config, | |
| get_inference_config, | |
| get_openvino_config, | |
| get_pytorch_config, | |
| get_ipex_config, | |
| ) | |
| from optimum_benchmark.launchers.base import Launcher # noqa | |
| from optimum_benchmark.backends.openvino.utils import TASKS_TO_OVMODEL | |
| from optimum_benchmark.backends.transformers_utils import TASKS_TO_MODEL_LOADERS | |
| from optimum_benchmark.backends.ipex.utils import TASKS_TO_IPEXMODEL | |
| from optimum_benchmark import ( | |
| BenchmarkConfig, | |
| PyTorchConfig, | |
| OVConfig, | |
| IPEXConfig, | |
| ProcessConfig, | |
| InferenceConfig, | |
| Benchmark, | |
| ) | |
| from optimum_benchmark.logging_utils import setup_logging | |
| DEVICE = "cpu" | |
| LAUNCHER = "process" | |
| SCENARIO = "inference" | |
| BACKENDS = ["openvino", "pytorch"] | |
| MODELS = [ | |
| "google-bert/bert-base-uncased", | |
| "openai-community/gpt2", | |
| ] | |
| TASKS = ( | |
| set(TASKS_TO_OVMODEL.keys()) | |
| & set(TASKS_TO_IPEXMODEL.keys()) | |
| & set(TASKS_TO_MODEL_LOADERS.keys()) | |
| ) | |
| def run_benchmark(kwargs, oauth_token: gr.OAuthToken): | |
| if oauth_token.token is None: | |
| gr.Error("Please login to be able to run the benchmark.") | |
| return tuple(None for _ in BACKENDS) | |
| timestamp = time.strftime("%Y-%m-%d-%H-%M-%S") | |
| username = whoami(oauth_token.token)["name"] | |
| repo_id = f"{username}/benchmarks" | |
| token = oauth_token.token | |
| create_repo(repo_id, token=token, repo_type="dataset", exist_ok=True) | |
| gr.Info(f'Benchmark will be pushed to "{username}/benchmarks" on the Hub') | |
| configs = { | |
| "process": {}, | |
| "inference": {}, | |
| "openvino": {}, | |
| "pytorch": {}, | |
| "ipex": {}, | |
| } | |
| for key, value in kwargs.items(): | |
| if key.label == "model": | |
| model = value | |
| elif key.label == "task": | |
| task = value | |
| elif key.label == "backends": | |
| backends = value | |
| elif "." in key.label: | |
| backend, argument = key.label.split(".") | |
| configs[backend][argument] = value | |
| else: | |
| continue | |
| for key in configs.keys(): | |
| for k, v in configs[key].items(): | |
| if "kwargs" in k: | |
| configs[key][k] = eval(v) | |
| configs["process"] = ProcessConfig(**configs.pop("process")) | |
| configs["inference"] = InferenceConfig(**configs.pop("inference")) | |
| configs["openvino"] = OVConfig( | |
| task=task, | |
| model=model, | |
| device=DEVICE, | |
| **configs["openvino"], | |
| ) | |
| configs["pytorch"] = PyTorchConfig( | |
| task=task, | |
| model=model, | |
| device=DEVICE, | |
| **configs["pytorch"], | |
| ) | |
| configs["ipex"] = IPEXConfig( | |
| task=task, | |
| model=model, | |
| device=DEVICE, | |
| **configs["ipex"], | |
| ) | |
| outputs = { | |
| "openvino": "Running benchmark for OpenVINO backend", | |
| "pytorch": "Running benchmark for PyTorch backend", | |
| "ipex": "Running benchmark for IPEX backend", | |
| } | |
| yield tuple(outputs[b] for b in BACKENDS) | |
| for backend in backends: | |
| try: | |
| benchmark_name = f"{timestamp}/{backend}" | |
| benchmark_config = BenchmarkConfig( | |
| name=benchmark_name, | |
| backend=configs[backend], | |
| launcher=configs[LAUNCHER], | |
| scenario=configs[SCENARIO], | |
| ) | |
| benchmark_config.push_to_hub( | |
| repo_id=repo_id, subfolder=benchmark_name, token=oauth_token.token | |
| ) | |
| benchmark_report = Benchmark.launch(benchmark_config) | |
| benchmark_report.push_to_hub( | |
| repo_id=repo_id, subfolder=benchmark_name, token=oauth_token.token | |
| ) | |
| benchmark = Benchmark(config=benchmark_config, report=benchmark_report) | |
| benchmark.push_to_hub( | |
| repo_id=repo_id, subfolder=benchmark_name, token=oauth_token.token | |
| ) | |
| gr.Info(f"Pushed benchmark to {username}/benchmarks/{benchmark_name}") | |
| outputs[backend] = f"\n{benchmark_report.to_markdown_text()}" | |
| yield tuple(outputs[b] for b in BACKENDS) | |
| except Exception: | |
| gr.Error(f"Error while running benchmark for {backend}") | |
| outputs[backend] = f"\n{traceback.format_exc()}" | |
| yield tuple(outputs[b] for b in BACKENDS) | |
| def build_demo(): | |
| with gr.Blocks() as demo: | |
| # add login button | |
| gr.LoginButton(min_width=250) | |
| # add image | |
| gr.Markdown( | |
| """<img src="https://huggingface.co/spaces/optimum/optimum-benchmark-ui/resolve/main/huggy_bench.png" style="display: block; margin-left: auto; margin-right: auto; width: 30%;">""" | |
| ) | |
| # title text | |
| gr.Markdown( | |
| "<h1 style='text-align: center'>🤗 Optimum-Benchmark Interface 🏋️</h1>" | |
| ) | |
| # explanation text | |
| gr.HTML( | |
| "<h3 style='text-align: center'>" | |
| "Zero code Gradio interface of " | |
| "<a href='https://github.com/huggingface/optimum-benchmark.git'>" | |
| "Optimum-Benchmark" | |
| "</a>" | |
| "<br>" | |
| "</h3>" | |
| "<p style='text-align: center'>" | |
| "This Space uses Optimum Benchmark to automatically benchmark a model from the Hub on different backends." | |
| "<br>" | |
| "The results (config and report) will be pushed under your namespace in a benchmark repository on the Hub." | |
| ) | |
| model = gr.Dropdown( | |
| label="model", | |
| choices=MODELS, | |
| value=MODELS[0], | |
| info="Model to run the benchmark on.", | |
| ) | |
| task = gr.Dropdown( | |
| label="task", | |
| choices=TASKS, | |
| value="feature-extraction", | |
| info="Task to run the benchmark on.", | |
| ) | |
| backends = gr.CheckboxGroup( | |
| interactive=True, | |
| label="backends", | |
| choices=BACKENDS, | |
| value=BACKENDS, | |
| info="Backends to run the benchmark on.", | |
| ) | |
| with gr.Row(): | |
| with gr.Accordion(label="Process Config", open=False, visible=True): | |
| process_config = get_process_config() | |
| with gr.Row(): | |
| with gr.Accordion(label="Scenario Config", open=False, visible=True): | |
| inference_config = get_inference_config() | |
| with gr.Row() as backend_configs: | |
| with gr.Accordion(label="OpenVINO Config", open=False, visible=True): | |
| openvino_config = get_openvino_config() | |
| with gr.Accordion(label="PyTorch Config", open=False, visible=True): | |
| pytorch_config = get_pytorch_config() | |
| with gr.Accordion(label="IPEX Config", open=False, visible=True): | |
| ipex_config = get_ipex_config() | |
| backends.change( | |
| inputs=backends, | |
| outputs=backend_configs.children, | |
| fn=lambda values: [ | |
| gr.update(visible=value in values) for value in BACKENDS | |
| ], | |
| ) | |
| with gr.Row(): | |
| button = gr.Button(value="Run Benchmark", variant="primary") | |
| with gr.Row() as md_output: | |
| with gr.Accordion(label="OpenVINO Output", open=True, visible=True): | |
| openvino_output = gr.Markdown() | |
| with gr.Accordion(label="PyTorch Output", open=True, visible=True): | |
| pytorch_output = gr.Markdown() | |
| with gr.Accordion(label="IPEX Output", open=True, visible=True): | |
| ipex_output = gr.Markdown() | |
| backends.change( | |
| inputs=backends, | |
| outputs=md_output.children, | |
| fn=lambda values: [ | |
| gr.update(visible=value in values) for value in BACKENDS | |
| ], | |
| ) | |
| button.click( | |
| fn=run_benchmark, | |
| inputs={ | |
| task, | |
| model, | |
| backends, | |
| *process_config.values(), | |
| *inference_config.values(), | |
| *openvino_config.values(), | |
| *pytorch_config.values(), | |
| *ipex_config.values(), | |
| }, | |
| outputs={ | |
| openvino_output, | |
| pytorch_output, | |
| ipex_output, | |
| }, | |
| concurrency_limit=1, | |
| ) | |
| return demo | |
| if __name__ == "__main__": | |
| os.environ["LOG_TO_FILE"] = "0" | |
| os.environ["LOG_LEVEL"] = "INFO" | |
| setup_logging(level="INFO", prefix="MAIN-PROCESS") | |
| demo = build_demo() | |
| demo.queue(max_size=10).launch() | |