IlyasMoutawwakil HF staff commited on
Commit
7f9a235
1 Parent(s): 74cbfec

interactive backend and benchmark

Browse files
Files changed (7) hide show
  1. .gitignore +2 -0
  2. app.py +82 -164
  3. base_config.yaml +2 -2
  4. configs.py +254 -0
  5. pyproject.toml +3 -0
  6. requirements.txt +1 -1
  7. run.py +79 -0
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ __pycache__
2
+ runs
app.py CHANGED
@@ -1,136 +1,28 @@
1
  import random
2
- import subprocess
3
  import gradio as gr
4
- from ansi2html import Ansi2HTMLConverter
5
  from optimum_benchmark.task_utils import (
6
  TASKS_TO_AUTOMODELS,
7
  infer_task_from_model_name_or_path,
8
  )
 
 
 
 
 
 
 
 
 
9
 
10
-
11
- def get_backend_config():
12
- return [
13
- # seed
14
- gr.Textbox(label="backend.seed", value=42),
15
- # inter_op_num_threads
16
- gr.Textbox(
17
- label="backend.inter_op_num_threads",
18
- value=None,
19
- placeholder=None,
20
- ),
21
- # intra_op_num_threads
22
- gr.Textbox(
23
- label="backend.intra_op_num_threads",
24
- value=None,
25
- placeholder=None,
26
- ),
27
- # initial_isolation_check
28
- gr.Checkbox(label="backend.initial_isolation_check", value=True),
29
- # continous_isolation_check
30
- gr.Checkbox(label="backend.continous_isolation_check", value=True),
31
- # delete_cache
32
- gr.Checkbox(label="backend.delete_cache", value=False),
33
- ]
34
-
35
-
36
- def get_inference_config():
37
- return [
38
- # duration
39
- gr.Textbox(label="benchmark.duration", value=10),
40
- # warmup runs
41
- gr.Textbox(label="benchmark.warmup_runs", value=1),
42
- ]
43
-
44
-
45
- def get_pytorch_config():
46
- return [
47
- # no_weights
48
- gr.Checkbox(label="backend.no_weights"),
49
- # device_map
50
- gr.Dropdown(["auto", "sequential"], label="backend.device_map"),
51
- # torch_dtype
52
- gr.Dropdown(
53
- ["bfloat16", "float16", "float32", "auto"],
54
- label="backend.torch_dtype",
55
- ),
56
- # disable_grad
57
- gr.Checkbox(label="backend.disable_grad"),
58
- # eval_mode
59
- gr.Checkbox(label="backend.eval_mode"),
60
- # amp_autocast
61
- gr.Checkbox(label="backend.amp_autocast"),
62
- # amp_dtype
63
- gr.Dropdown(["bfloat16", "float16"], label="backend.amp_dtype"),
64
- # torch_compile
65
- gr.Checkbox(label="backend.torch_compile"),
66
- # bettertransformer
67
- gr.Checkbox(label="backend.bettertransformer"),
68
- # quantization_scheme
69
- gr.Dropdown(["gptq", "bnb"], label="backend.quantization_scheme"),
70
- # use_ddp
71
- gr.Checkbox(label="backend.use_ddp"),
72
- # peft_strategy
73
- gr.Textbox(label="backend.peft_strategy"),
74
- ]
75
-
76
-
77
- conv = Ansi2HTMLConverter()
78
-
79
-
80
- def run_experiment(kwargs):
81
- arguments = [
82
- "optimum-benchmark",
83
- "--config-dir",
84
- "./",
85
- "--config-name",
86
- "base_config",
87
- ]
88
- for key, value in kwargs.items():
89
- arguments.append(f"{key.label}={value if value != '' else 'null'}")
90
-
91
- # stream subprocess output
92
- process = subprocess.Popen(
93
- arguments,
94
- stdout=subprocess.PIPE,
95
- stderr=subprocess.STDOUT,
96
- universal_newlines=True,
97
- )
98
-
99
- ansi_text = ""
100
- for ansi_line in iter(process.stdout.readline, ""):
101
- # stream process output
102
- print(ansi_line, end="")
103
- # append line to ansi text
104
- ansi_text += ansi_line
105
- # convert ansi to html
106
- html_text = conv.convert(ansi_text)
107
- # extract style from html
108
- style = html_text.split('<style type="text/css">')[1].split("</style>")[0]
109
- # parse style into dict
110
- style_dict = {}
111
- for line in style.split("\n"):
112
- if line:
113
- key, value = line.split("{")
114
- key = key.replace(".", "").strip()
115
- value = value.split("}")[0].strip()
116
- style_dict[key] = value
117
-
118
- # replace style in html
119
- for key, value in style_dict.items():
120
- html_text = html_text.replace(f'class="{key}"', f'style="{value}"')
121
-
122
- yield html_text
123
-
124
- return html_text
125
 
126
 
127
  with gr.Blocks() as demo:
128
  # title text
129
- gr.HTML("<h1 style='text-align: center'>🤗 Optimum Benchmark 🏋️</h1>")
130
  # explanation text
131
- gr.Markdown(
132
- "This is a demo space of [Optimum-Benchmark](https://github.com/huggingface/optimum-benchmark.git)."
133
- )
134
 
135
  model = gr.Textbox(
136
  label="model",
@@ -143,64 +35,90 @@ with gr.Blocks() as demo:
143
  )
144
  device = gr.Dropdown(
145
  value="cpu",
146
- choices=["cpu", "cuda"],
147
  label="device",
 
148
  )
149
- expetiment_name = gr.Textbox(
150
  label="experiment_name",
151
  value=f"experiment_{random.getrandbits(16)}",
152
  )
153
-
154
- model.submit(fn=infer_task_from_model_name_or_path, inputs=[model], outputs=[task])
155
 
156
  with gr.Row():
157
- with gr.Column(variant="panel"):
158
- backend = gr.Dropdown(
159
- ["pytorch", "onnxruntime", "openvino", "neural-compressor"],
160
- label="backend",
161
- value="pytorch",
162
- container=True,
163
- )
164
-
165
- with gr.Column(variant="panel"):
166
- with gr.Accordion(label="Backend Config", open=False):
167
- backend_config = get_backend_config() + get_pytorch_config()
168
-
169
- with gr.Row():
170
- with gr.Column(variant="panel"):
171
- benchmark = gr.Dropdown(
172
- choices=["inference", "training"],
173
- label="benchmark",
174
- value="inference",
175
- container=True,
176
- )
177
-
178
- with gr.Column(variant="panel"):
179
- with gr.Accordion(label="Benchmark Config", open=False):
180
- benchmark_config = get_inference_config()
181
-
182
- # run benchmark button
183
- run_benchmark = gr.Button(value="Run Benchmark", variant="primary")
184
- # accordion with output logs
185
- with gr.Accordion(label="Logs:", open=True):
186
- logs = gr.HTML()
187
-
188
- run_benchmark.click(
189
- fn=run_experiment,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190
  inputs={
191
- expetiment_name,
192
  model,
193
  task,
194
  device,
195
  backend,
196
  benchmark,
197
- *backend_config,
198
- *benchmark_config,
 
 
 
 
199
  },
200
- outputs=[logs],
201
  queue=True,
202
  )
 
 
 
 
 
203
 
204
 
205
- if __name__ == "__main__":
206
- demo.queue().launch()
 
1
  import random
 
2
  import gradio as gr
 
3
  from optimum_benchmark.task_utils import (
4
  TASKS_TO_AUTOMODELS,
5
  infer_task_from_model_name_or_path,
6
  )
7
+ from run import run_benchmark
8
+ from configs import (
9
+ get_training_config,
10
+ get_inference_config,
11
+ get_neural_compressor_config,
12
+ get_onnxruntime_config,
13
+ get_openvino_config,
14
+ get_pytorch_config,
15
+ )
16
 
17
+ BACKENDS = ["pytorch", "onnxruntime", "openvino", "neural-compressor"]
18
+ BENCHMARKS = ["inference", "training"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
 
21
  with gr.Blocks() as demo:
22
  # title text
23
+ gr.HTML("<h1 style='text-align: center'>🤗 Optimum Benchmark UI 🏋️</h1>")
24
  # explanation text
25
+ gr.Markdown("This is a demo space of [Optimum-Benchmark](https://github.com/huggingface/optimum-benchmark.git).")
 
 
26
 
27
  model = gr.Textbox(
28
  label="model",
 
35
  )
36
  device = gr.Dropdown(
37
  value="cpu",
 
38
  label="device",
39
+ choices=["cpu", "cuda"],
40
  )
41
+ experiment = gr.Textbox(
42
  label="experiment_name",
43
  value=f"experiment_{random.getrandbits(16)}",
44
  )
45
+ model.submit(fn=infer_task_from_model_name_or_path, inputs=model, outputs=task)
 
46
 
47
  with gr.Row():
48
+ with gr.Column():
49
+ with gr.Row():
50
+ backend = gr.Dropdown(
51
+ label="backend",
52
+ choices=BACKENDS,
53
+ value=BACKENDS[0],
54
+ )
55
+
56
+ with gr.Row() as backend_configs:
57
+ with gr.Accordion(label="Pytorch Config", open=False, visible=True):
58
+ pytorch_config = get_pytorch_config()
59
+ with gr.Accordion(label="OnnxRunTime Config", open=False, visible=False):
60
+ onnxruntime_config = get_onnxruntime_config()
61
+ with gr.Accordion(label="OpenVINO Config", open=False, visible=False):
62
+ openvino_config = get_openvino_config()
63
+ with gr.Accordion(label="Neural Compressor Config", open=False, visible=False):
64
+ neural_compressor_config = get_neural_compressor_config()
65
+
66
+ # hide backend configs based on backend
67
+ backend.change(
68
+ inputs=backend,
69
+ outputs=backend_configs.children,
70
+ fn=lambda value: [gr.update(visible=value == key) for key in BACKENDS],
71
+ )
72
+
73
+ with gr.Column():
74
+ with gr.Row():
75
+ benchmark = gr.Dropdown(
76
+ label="benchmark",
77
+ choices=BENCHMARKS,
78
+ value=BENCHMARKS[0],
79
+ )
80
+
81
+ with gr.Row() as benchmark_configs:
82
+ with gr.Accordion(label="Inference Config", open=False, visible=True):
83
+ inference_config = get_inference_config()
84
+ with gr.Accordion(label="Training Config", open=False, visible=False):
85
+ training_config = get_training_config()
86
+
87
+ # hide benchmark configs based on benchmark
88
+ benchmark.change(
89
+ inputs=benchmark,
90
+ outputs=benchmark_configs.children,
91
+ fn=lambda value: [gr.update(visible=value == key) for key in BENCHMARKS],
92
+ )
93
+
94
+ button = gr.Button(value="Run Benchmark", variant="primary")
95
+ with gr.Accordion(label="LOGS", open=True, visible=False):
96
+ output = gr.HTML()
97
+
98
+ button.click(
99
+ fn=run_benchmark,
100
  inputs={
101
+ experiment,
102
  model,
103
  task,
104
  device,
105
  backend,
106
  benchmark,
107
+ *pytorch_config,
108
+ *openvino_config,
109
+ *onnxruntime_config,
110
+ *neural_compressor_config,
111
+ *inference_config,
112
+ *training_config,
113
  },
114
+ outputs=output,
115
  queue=True,
116
  )
117
+ button.click(
118
+ inputs=[],
119
+ outputs=output.parent,
120
+ fn=lambda: gr.update(visible=True),
121
+ )
122
 
123
 
124
+ demo.queue().launch()
 
base_config.yaml CHANGED
@@ -1,6 +1,6 @@
1
  defaults:
2
- - backend: pytorch # default backend
3
- - benchmark: inference # default benchmark
4
  - experiment # inheriting experiment schema
5
  - _self_ # for hydra 1.1 compatibility
6
  - override hydra/job_logging: colorlog # colorful logging
 
1
  defaults:
2
+ - backend: null # default backend
3
+ - benchmark: null # default benchmark
4
  - experiment # inheriting experiment schema
5
  - _self_ # for hydra 1.1 compatibility
6
  - override hydra/job_logging: colorlog # colorful logging
configs.py ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+
4
+ def get_base_backend_config(backend_name="pytorch"):
5
+ return [
6
+ # seed
7
+ gr.Textbox(
8
+ value=42,
9
+ label=f"{backend_name}.seed",
10
+ info="Sets seed for reproducibility",
11
+ ),
12
+ # inter_op_num_threads
13
+ gr.Textbox(
14
+ value="null",
15
+ label=f"{backend_name}.inter_op_num_threads",
16
+ info="Use null for default and -1 for cpu_count()",
17
+ ),
18
+ # intra_op_num_threads
19
+ gr.Textbox(
20
+ value="null",
21
+ label=f"{backend_name}.intra_op_num_threads",
22
+ info="Use null for default and -1 for cpu_count()",
23
+ ),
24
+ # initial_isolation_check
25
+ gr.Checkbox(
26
+ value=True,
27
+ label=f"{backend_name}.initial_isolation_check",
28
+ info="Makes sure that initially, no other process is running on the target device",
29
+ ),
30
+ # continous_isolation_check
31
+ gr.Checkbox(
32
+ value=True,
33
+ label=f"{backend_name}.continous_isolation_check",
34
+ info="Makes sure that throughout the benchmark, no other process is running on the target device",
35
+ ),
36
+ # delete_cache
37
+ gr.Checkbox(
38
+ value=False,
39
+ label=f"{backend_name}.delete_cache",
40
+ info="Deletes model cache (weights & configs) after benchmark is done",
41
+ ),
42
+ ]
43
+
44
+
45
+ def get_pytorch_config():
46
+ return get_base_backend_config(backend_name="pytorch") + [
47
+ # no_weights
48
+ gr.Checkbox(
49
+ value=False,
50
+ label="pytorch.no_weights",
51
+ info="Generates random weights instead of downloading pretrained ones",
52
+ ),
53
+ # # device_map
54
+ # gr.Dropdown(
55
+ # value="null",
56
+ #
57
+ # label="pytorch.device_map",
58
+ # choices=["null", "auto", "sequential"],
59
+ # info="Use null for default and `auto` or `sequential` the same way as in `from_pretrained`",
60
+ # ),
61
+ # torch_dtype
62
+ gr.Dropdown(
63
+ value="null",
64
+ label="pytorch.torch_dtype",
65
+ choices=["null", "bfloat16", "float16", "float32", "auto"],
66
+ info="Use null for default and `auto` for automatic dtype selection",
67
+ ),
68
+ # amp_autocast
69
+ gr.Checkbox(
70
+ value=False,
71
+ label="pytorch.amp_autocast",
72
+ info="Enables Pytorch's native Automatic Mixed Precision",
73
+ ),
74
+ # amp_dtype
75
+ gr.Dropdown(
76
+ value="null",
77
+ label="pytorch.amp_dtype",
78
+ info="Use null for default",
79
+ choices=["null", "bfloat16", "float16"],
80
+ ),
81
+ # torch_compile
82
+ gr.Checkbox(
83
+ value=False,
84
+ label="pytorch.torch_compile",
85
+ info="Compiles the model with torch.compile",
86
+ ),
87
+ # bettertransformer
88
+ gr.Checkbox(
89
+ value=False,
90
+ label="pytorch.bettertransformer",
91
+ info="Applies optimum.BetterTransformer for fastpath anf optimized attention",
92
+ ),
93
+ # quantization_scheme
94
+ gr.Dropdown(
95
+ value="null",
96
+ choices=["null", "gptq", "bnb"],
97
+ label="pytorch.quantization_scheme",
98
+ info="Use null for no quantization",
99
+ ),
100
+ # # use_ddp
101
+ # gr.Checkbox(
102
+ # value=False,
103
+ #
104
+ # label="pytorch.use_ddp",
105
+ # info="Uses DistributedDataParallel for multi-gpu training",
106
+ # ),
107
+ # peft_strategy
108
+ gr.Textbox(
109
+ value="null",
110
+ label="pytorch.peft_strategy",
111
+ ),
112
+ ]
113
+
114
+
115
+ def get_onnxruntime_config():
116
+ return get_base_backend_config(backend_name="onnxruntime")
117
+ # no_weights
118
+
119
+
120
+
121
+ # no_weights: bool = False
122
+
123
+ # # export options
124
+ # export: bool = True
125
+ # use_cache: bool = True
126
+ # use_merged: bool = False
127
+ # torch_dtype: Optional[str] = None
128
+
129
+ # # provider options
130
+ # provider: str = "${infer_provider:${device}}"
131
+ # device_id: Optional[int] = "${oc.deprecated:backend.provider_options.device_id}"
132
+ # provider_options: Dict[str, Any] = field(default_factory=lambda: {"device_id": "${infer_device_id:${device}}"})
133
+
134
+ # # inference options
135
+ # use_io_binding: bool = "${is_gpu:${device}}"
136
+ # enable_profiling: bool = "${oc.deprecated:backend.session_options.enable_profiling}"
137
+ # session_options: Dict[str, Any] = field(
138
+ # default_factory=lambda: {"enable_profiling": "${is_profiling:${benchmark.name}}"}
139
+ # )
140
+
141
+ # # optimization options
142
+ # optimization: bool = False
143
+ # optimization_config: Dict[str, Any] = field(default_factory=dict)
144
+
145
+ # # quantization options
146
+ # quantization: bool = False
147
+ # quantization_config: Dict[str, Any] = field(default_factory=dict)
148
+
149
+ # # calibration options
150
+ # calibration: bool = False
151
+ # calibration_config: Dict[str, Any] = field(default_factory=dict)
152
+
153
+ # # null, O1, O2, O3, O4
154
+ # auto_optimization: Optional[str] = None
155
+ # auto_optimization_config: Dict[str, Any] = field(default_factory=dict)
156
+
157
+ # # null, arm64, avx2, avx512, avx512_vnni, tensorrt
158
+ # auto_quantization: Optional[str] = None
159
+ # auto_quantization_config: Dict[str, Any] = field(default_factory=dict)
160
+
161
+ # # ort-training is basically a different package so we might need to seperate these two backends in the future
162
+ # use_inference_session: bool = "${is_inference:${benchmark.name}}"
163
+
164
+ # # training options
165
+ # use_ddp: bool = False
166
+ # ddp_config: Dict[str, Any] = field(default_factory=dict)
167
+
168
+ # # peft options
169
+ # peft_strategy: Optional[str] = None
170
+ # peft_config: Dict[str, Any] = field(default_factory=dict)
171
+
172
+ def get_openvino_config():
173
+ return get_base_backend_config(backend_name="openvino")
174
+
175
+
176
+ def get_neural_compressor_config():
177
+ return get_base_backend_config(backend_name="neural_compressor")
178
+
179
+
180
+ def get_inference_config():
181
+ return [
182
+ # duration
183
+ gr.Textbox(
184
+ value=10,
185
+ label="inference.duration",
186
+ info="Minimum duration of benchmark in seconds",
187
+ ),
188
+ # warmup runs
189
+ gr.Textbox(
190
+ value=10,
191
+ label="inference.warmup_runs",
192
+ info="Number of warmup runs before measurements",
193
+ ),
194
+ # memory
195
+ gr.Checkbox(
196
+ value=False,
197
+ label="inference.memory",
198
+ info="Measures the peak memory footprint",
199
+ ),
200
+ # energy
201
+ gr.Checkbox(
202
+ value=False,
203
+ label="inference.energy",
204
+ info="Measures energy consumption and carbon emissions",
205
+ ),
206
+ # input_shapes
207
+ gr.Dataframe(
208
+ type="array",
209
+ value=[[2, 16]],
210
+ row_count=(1, "static"),
211
+ col_count=(2, "dynamic"),
212
+ label="inference.input_shapes",
213
+ headers=["batch_size", "sequence_length"],
214
+ info="Controllable input shapes, add more columns for more inputs",
215
+ ),
216
+ # forward kwargs
217
+ gr.Dataframe(
218
+ type="array",
219
+ value=[[False]],
220
+ headers=["return_dict"],
221
+ row_count=(1, "static"),
222
+ col_count=(1, "dynamic"),
223
+ label="inference.forward_kwargs",
224
+ info="Keyword arguments for the forward pass, add more columns for more arguments",
225
+ ),
226
+ ]
227
+
228
+
229
+ def get_training_config():
230
+ return [
231
+ # warmup steps
232
+ gr.Textbox(
233
+ value=40,
234
+ label="training.warmup_steps",
235
+ ),
236
+ # dataset_shapes
237
+ gr.Dataframe(
238
+ type="array",
239
+ value=[[500, 16]],
240
+ headers=["dataset_size", "sequence_length"],
241
+ row_count=(1, "static"),
242
+ col_count=(2, "dynamic"),
243
+ label="training.dataset_shapes",
244
+ ),
245
+ # training_arguments
246
+ gr.Dataframe(
247
+ value=[[2]],
248
+ type="array",
249
+ row_count=(1, "static"),
250
+ col_count=(1, "dynamic"),
251
+ label="training.training_arguments",
252
+ headers=["per_device_train_batch_size"],
253
+ ),
254
+ ]
pyproject.toml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [tool.black]
2
+ line-length = 119
3
+ target-version = ['py37']
requirements.txt CHANGED
@@ -1,3 +1,3 @@
1
  gradio
2
  ansi2html
3
- optimum-benchmark[onnxruntime,openvino,neural-compressor]@git+https://github.com/huggingface/optimum-benchmark.git
 
1
  gradio
2
  ansi2html
3
+ optimum-benchmark[onnxruntime,openvino,neural-compressor,diffusers,peft]@git+https://github.com/huggingface/optimum-benchmark.git
run.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pprint
2
+ import subprocess
3
+ import gradio as gr
4
+ from ansi2html import Ansi2HTMLConverter
5
+
6
+ ansi2html_converter = Ansi2HTMLConverter(inline=True)
7
+
8
+
9
+ def run_benchmark(kwargs):
10
+ for key, value in kwargs.copy().items():
11
+ if key.label == "experiment_name":
12
+ experiment_name = value
13
+ kwargs.pop(key)
14
+ elif key.label == "model":
15
+ model = value
16
+ kwargs.pop(key)
17
+ elif key.label == "task":
18
+ task = value
19
+ kwargs.pop(key)
20
+ elif key.label == "device":
21
+ device = value
22
+ kwargs.pop(key)
23
+ elif key.label == "backend":
24
+ backend = value
25
+ kwargs.pop(key)
26
+ elif key.label == "benchmark":
27
+ benchmark = value
28
+ kwargs.pop(key)
29
+ else:
30
+ continue
31
+
32
+ arguments = [
33
+ "optimum-benchmark",
34
+ "--config-dir",
35
+ "./",
36
+ "--config-name",
37
+ "base_config",
38
+ f"task={task}",
39
+ f"model={model}",
40
+ f"device={device}",
41
+ f"backend={backend}",
42
+ f"benchmark={benchmark}",
43
+ f"experiment_name={experiment_name}",
44
+ ]
45
+
46
+ for component, value in kwargs.items():
47
+ if f"{backend}." in component.label or f"{benchmark}." in component.label:
48
+ label = component.label.replace(f"{backend}.", "backend.").replace(f"{benchmark}.", "benchmark.")
49
+
50
+ if isinstance(component, gr.Dataframe):
51
+ for sub_key, sub_value in zip(component.headers, value[0]):
52
+ arguments.append(f"++{label}.{sub_key}={sub_value}")
53
+ else:
54
+ arguments.append(f"{label}={value}")
55
+
56
+ pprint.pprint(arguments)
57
+
58
+ # stream subprocess output
59
+ process = subprocess.Popen(
60
+ arguments,
61
+ stdout=subprocess.PIPE,
62
+ stderr=subprocess.STDOUT,
63
+ universal_newlines=True,
64
+ )
65
+
66
+ ansi_text = ""
67
+ for ansi_line in iter(process.stdout.readline, ""):
68
+ if "torch.distributed.nn.jit.instantiator" in ansi_line:
69
+ continue
70
+ # stream process output
71
+ print(ansi_line, end="")
72
+ # append line to ansi text
73
+ ansi_text += ansi_line
74
+ # convert ansi to html
75
+ html_text = ansi2html_converter.convert(ansi_text)
76
+ # stream html output
77
+ yield html_text
78
+
79
+ return html_text