IlyasMoutawwakil HF staff commited on
Commit
c57954b
β€’
1 Parent(s): a61e83a
Files changed (1) hide show
  1. app.py +15 -8
app.py CHANGED
@@ -11,7 +11,8 @@ from config_store import (
11
  )
12
 
13
  import gradio as gr
14
- from huggingface_hub import whoami, login, logout
 
15
  from gradio_huggingfacehub_search import HuggingfaceHubSearch
16
  from optimum_benchmark.launchers.device_isolation_utils import * # noqa
17
  from optimum_benchmark.backends.openvino.utils import TASKS_TO_OVMODEL
@@ -37,7 +38,7 @@ BENCHMARKS_REPO_ID = "optimum-benchmark/OpenVINO-Benchmarks"
37
  TASKS = set(TASKS_TO_OVMODEL.keys()) & set(TASKS_TO_MODEL_LOADERS.keys())
38
 
39
 
40
- def parse_configs(inputs, token: str):
41
  configs = {"process": {}, "inference": {}, "pytorch": {}, "openvino": {}}
42
 
43
  for key, value in inputs.items():
@@ -63,14 +64,12 @@ def parse_configs(inputs, token: str):
63
  model=model,
64
  device=DEVICE,
65
  **configs["pytorch"],
66
- model_kwargs={"token": token},
67
  )
68
  configs["openvino"] = OVConfig(
69
  task=task,
70
  model=model,
71
  device=DEVICE,
72
  **configs["openvino"],
73
- model_kwargs={"token": token},
74
  )
75
 
76
  return configs
@@ -87,7 +86,7 @@ def run_benchmark(inputs, oauth_token: Optional[gr.OAuthToken]):
87
  gr.Info(f"πŸ“© Benchmark will be saved under {BENCHMARKS_REPO_ID}/{folder}")
88
 
89
  outputs = {backend: "Running..." for backend in BACKENDS}
90
- configs = parse_configs(inputs, token=oauth_token.token)
91
  yield tuple(outputs[b] for b in BACKENDS)
92
 
93
  for backend in BACKENDS:
@@ -101,9 +100,6 @@ def run_benchmark(inputs, oauth_token: Optional[gr.OAuthToken]):
101
  )
102
  benchmark_report = Benchmark.launch(benchmark_config)
103
 
104
- # make sure the token is never pushed to the hub
105
- del benchmark_config.backend.model_kwargs["token"]
106
-
107
  benchmark_config.push_to_hub(
108
  repo_id=BENCHMARKS_REPO_ID,
109
  subfolder=benchmark_name,
@@ -115,6 +111,11 @@ def run_benchmark(inputs, oauth_token: Optional[gr.OAuthToken]):
115
  token=BENCHMARKS_HF_TOKEN,
116
  )
117
 
 
 
 
 
 
118
  except Exception:
119
  outputs[backend] = f"\n```python-traceback\n{traceback.format_exc()}```\n"
120
  yield tuple(outputs[b] for b in BACKENDS)
@@ -129,6 +130,12 @@ def run_benchmark(inputs, oauth_token: Optional[gr.OAuthToken]):
129
  def update_task(model_id):
130
  try:
131
  inferred_task = infer_task_from_model_name_or_path(model_id)
 
 
 
 
 
 
132
  except Exception:
133
  raise gr.Error(
134
  f"Error while inferring task for {model_id}, please select a task manually."
 
11
  )
12
 
13
  import gradio as gr
14
+ from huggingface_hub import whoami
15
+ from huggingface_hub.errors import GatedRepoError
16
  from gradio_huggingfacehub_search import HuggingfaceHubSearch
17
  from optimum_benchmark.launchers.device_isolation_utils import * # noqa
18
  from optimum_benchmark.backends.openvino.utils import TASKS_TO_OVMODEL
 
38
  TASKS = set(TASKS_TO_OVMODEL.keys()) & set(TASKS_TO_MODEL_LOADERS.keys())
39
 
40
 
41
+ def parse_configs(inputs):
42
  configs = {"process": {}, "inference": {}, "pytorch": {}, "openvino": {}}
43
 
44
  for key, value in inputs.items():
 
64
  model=model,
65
  device=DEVICE,
66
  **configs["pytorch"],
 
67
  )
68
  configs["openvino"] = OVConfig(
69
  task=task,
70
  model=model,
71
  device=DEVICE,
72
  **configs["openvino"],
 
73
  )
74
 
75
  return configs
 
86
  gr.Info(f"πŸ“© Benchmark will be saved under {BENCHMARKS_REPO_ID}/{folder}")
87
 
88
  outputs = {backend: "Running..." for backend in BACKENDS}
89
+ configs = parse_configs(inputs)
90
  yield tuple(outputs[b] for b in BACKENDS)
91
 
92
  for backend in BACKENDS:
 
100
  )
101
  benchmark_report = Benchmark.launch(benchmark_config)
102
 
 
 
 
103
  benchmark_config.push_to_hub(
104
  repo_id=BENCHMARKS_REPO_ID,
105
  subfolder=benchmark_name,
 
111
  token=BENCHMARKS_HF_TOKEN,
112
  )
113
 
114
+ except GatedRepoError:
115
+ outputs[backend] = f"πŸ”’ Model {configs[backend].model} is gated."
116
+ yield tuple(outputs[b] for b in BACKENDS)
117
+ gr.Info("πŸ”’ Gated Repo Error while trying to access the model.")
118
+
119
  except Exception:
120
  outputs[backend] = f"\n```python-traceback\n{traceback.format_exc()}```\n"
121
  yield tuple(outputs[b] for b in BACKENDS)
 
130
  def update_task(model_id):
131
  try:
132
  inferred_task = infer_task_from_model_name_or_path(model_id)
133
+
134
+ except GatedRepoError:
135
+ raise gr.Error(
136
+ f"Model {model_id} is gated, please use optimum-benchmark locally to benchmark it."
137
+ )
138
+
139
  except Exception:
140
  raise gr.Error(
141
  f"Error while inferring task for {model_id}, please select a task manually."