IlyasMoutawwakil HF staff commited on
Commit
47984ee
β€’
1 Parent(s): bfd434f
Files changed (1) hide show
  1. app.py +24 -37
app.py CHANGED
@@ -1,7 +1,6 @@
1
  import os
2
  import time
3
  import traceback
4
- import logging
5
  from typing import Optional
6
 
7
  from config_store import (
@@ -36,10 +35,8 @@ BACKENDS = ["pytorch", "openvino"]
36
  TASKS = set(TASKS_TO_OVMODEL.keys()) & set(TASKS_TO_MODEL_LOADERS.keys())
37
 
38
 
39
- def run_benchmark(kwargs, oauth_token: Optional[gr.OAuthToken] = None):
40
  if oauth_token.token is None or oauth_token.token == "":
41
- yield tuple(None for _ in BACKENDS)
42
-
43
  raise gr.Error("Please login to be able to run the benchmark.")
44
 
45
  timestamp = time.strftime("%Y-%m-%d-%H-%M-%S")
@@ -47,17 +44,15 @@ def run_benchmark(kwargs, oauth_token: Optional[gr.OAuthToken] = None):
47
  repo_id = f"{name}/benchmarks"
48
  token = oauth_token.token
49
 
50
- create_repo(repo_id, token=token, repo_type="dataset", exist_ok=True)
51
- gr.Info(f'Created repository "{repo_id}" where results will be pushed.')
 
 
 
52
 
53
- configs = {
54
- "process": {},
55
- "inference": {},
56
- "pytorch": {},
57
- "openvino": {},
58
- }
59
 
60
- for key, value in kwargs.items():
61
  if key.label == "model":
62
  model = value
63
  elif key.label == "task":
@@ -76,16 +71,10 @@ def run_benchmark(kwargs, oauth_token: Optional[gr.OAuthToken] = None):
76
  configs["process"] = ProcessConfig(**configs.pop("process"))
77
  configs["inference"] = InferenceConfig(**configs.pop("inference"))
78
  configs["pytorch"] = PyTorchConfig(
79
- task=task,
80
- model=model,
81
- device=DEVICE,
82
- **configs["pytorch"],
83
  )
84
  configs["openvino"] = OVConfig(
85
- task=task,
86
- model=model,
87
- device=DEVICE,
88
- **configs["openvino"],
89
  )
90
 
91
  outputs = {
@@ -95,8 +84,6 @@ def run_benchmark(kwargs, oauth_token: Optional[gr.OAuthToken] = None):
95
 
96
  yield tuple(outputs[b] for b in BACKENDS)
97
 
98
- setup_logging(level="INFO", prefix="MAIN-PROCESS")
99
-
100
  for backend in BACKENDS:
101
  try:
102
  benchmark_name = f"{timestamp}/{backend}"
@@ -118,20 +105,19 @@ def run_benchmark(kwargs, oauth_token: Optional[gr.OAuthToken] = None):
118
  repo_id=repo_id, subfolder=benchmark_name, token=oauth_token.token
119
  )
120
 
121
- outputs[backend] = f"\n{benchmark_report.to_markdown_text()}"
 
122
 
123
  yield tuple(outputs[b] for b in BACKENDS)
124
 
125
- gr.Info(f"Pushed benchmark to {repo_id}/{benchmark_name}")
126
 
127
- except Exception:
128
- outputs[backend] = f"\n```python\n{traceback.format_exc()}```"
129
 
130
  yield tuple(outputs[b] for b in BACKENDS)
131
 
132
- raise gr.Error(f"Error while running benchmark for {backend}")
133
-
134
- logging.getLogger().setLevel(logging.NOTSET)
135
 
136
 
137
  def update_task(model_id):
@@ -195,11 +181,11 @@ with gr.Blocks() as demo:
195
  with gr.Row():
196
  button = gr.Button(value="Run Benchmark", variant="primary")
197
 
198
- with gr.Row() as markdown_outputs:
199
- with gr.Accordion(label="PyTorch Output", open=True, visible=True):
200
- pytorch_output = gr.Markdown()
201
- with gr.Accordion(label="OpenVINO Output", open=True, visible=True):
202
- openvino_output = gr.Markdown()
203
 
204
  model.submit(inputs=model, outputs=task, fn=update_task)
205
 
@@ -215,8 +201,8 @@ with gr.Blocks() as demo:
215
  *openvino_config.values(),
216
  },
217
  outputs={
218
- pytorch_output,
219
- openvino_output,
220
  },
221
  concurrency_limit=1,
222
  )
@@ -225,5 +211,6 @@ with gr.Blocks() as demo:
225
  if __name__ == "__main__":
226
  os.environ["LOG_TO_FILE"] = "0"
227
  os.environ["LOG_LEVEL"] = "INFO"
 
228
 
229
  demo.queue(max_size=10).launch()
 
1
  import os
2
  import time
3
  import traceback
 
4
  from typing import Optional
5
 
6
  from config_store import (
 
35
  TASKS = set(TASKS_TO_OVMODEL.keys()) & set(TASKS_TO_MODEL_LOADERS.keys())
36
 
37
 
38
+ def run_benchmark(inputs, oauth_token: Optional[gr.OAuthToken] = None):
39
  if oauth_token.token is None or oauth_token.token == "":
 
 
40
  raise gr.Error("Please login to be able to run the benchmark.")
41
 
42
  timestamp = time.strftime("%Y-%m-%d-%H-%M-%S")
 
44
  repo_id = f"{name}/benchmarks"
45
  token = oauth_token.token
46
 
47
+ try:
48
+ create_repo(repo_id, token=token, repo_type="dataset", exist_ok=True)
49
+ gr.Info(f"πŸ“‚ Created dataset repository {repo_id} on the Hub.")
50
+ except Exception:
51
+ raise gr.Error(f"Error while creating dataset repository {repo_id} on the Hub.")
52
 
53
+ configs = {"process": {}, "inference": {}, "pytorch": {}, "openvino": {}}
 
 
 
 
 
54
 
55
+ for key, value in inputs.items():
56
  if key.label == "model":
57
  model = value
58
  elif key.label == "task":
 
71
  configs["process"] = ProcessConfig(**configs.pop("process"))
72
  configs["inference"] = InferenceConfig(**configs.pop("inference"))
73
  configs["pytorch"] = PyTorchConfig(
74
+ task=task, model=model, device=DEVICE, **configs["pytorch"]
 
 
 
75
  )
76
  configs["openvino"] = OVConfig(
77
+ task=task, model=model, device=DEVICE, **configs["openvino"]
 
 
 
78
  )
79
 
80
  outputs = {
 
84
 
85
  yield tuple(outputs[b] for b in BACKENDS)
86
 
 
 
87
  for backend in BACKENDS:
88
  try:
89
  benchmark_name = f"{timestamp}/{backend}"
 
105
  repo_id=repo_id, subfolder=benchmark_name, token=oauth_token.token
106
  )
107
 
108
+ except Exception:
109
+ outputs[backend] = f"\n```python-traceback\n{traceback.format_exc()}```\n"
110
 
111
  yield tuple(outputs[b] for b in BACKENDS)
112
 
113
+ gr.Info(f"❌ Error while running benchmark for {backend} backend.")
114
 
115
+ else:
116
+ outputs[backend] = f"\n{benchmark_report.to_markdown_text()}\n"
117
 
118
  yield tuple(outputs[b] for b in BACKENDS)
119
 
120
+ gr.Info(f"βœ… Benchmark for {backend} backend ran successfully.")
 
 
121
 
122
 
123
  def update_task(model_id):
 
181
  with gr.Row():
182
  button = gr.Button(value="Run Benchmark", variant="primary")
183
 
184
+ with gr.Row():
185
+ with gr.Accordion(label="PyTorch Report", open=True, visible=True):
186
+ pytorch_report = gr.Markdown()
187
+ with gr.Accordion(label="OpenVINO Report", open=True, visible=True):
188
+ openvino_report = gr.Markdown()
189
 
190
  model.submit(inputs=model, outputs=task, fn=update_task)
191
 
 
201
  *openvino_config.values(),
202
  },
203
  outputs={
204
+ pytorch_report,
205
+ openvino_report,
206
  },
207
  concurrency_limit=1,
208
  )
 
211
  if __name__ == "__main__":
212
  os.environ["LOG_TO_FILE"] = "0"
213
  os.environ["LOG_LEVEL"] = "INFO"
214
+ setup_logging(level="INFO", prefix="MAIN-PROCESS")
215
 
216
  demo.queue(max_size=10).launch()