IlyasMoutawwakil HF staff commited on
Commit
6c813f8
1 Parent(s): 976aad2

use search

Browse files
Files changed (2) hide show
  1. app.py +9 -21
  2. requirements.txt +1 -0
app.py CHANGED
@@ -13,6 +13,7 @@ from config_store import (
13
 
14
  import gradio as gr
15
  from huggingface_hub import create_repo, whoami
 
16
  from optimum_benchmark.launchers.device_isolation_utils import * # noqa
17
  from optimum_benchmark.backends.openvino.utils import TASKS_TO_OVMODEL
18
  from optimum_benchmark.backends.transformers_utils import TASKS_TO_MODEL_LOADERS
@@ -25,24 +26,13 @@ from optimum_benchmark import (
25
  OVConfig,
26
  )
27
  from optimum_benchmark.logging_utils import setup_logging
 
28
 
29
 
30
  DEVICE = "cpu"
31
  LAUNCHER = "process"
32
  SCENARIO = "inference"
33
  BACKENDS = ["pytorch", "openvino"]
34
- MODELS = [
35
- "openai-community/gpt2",
36
- "google-bert/bert-base-uncased",
37
- "hf-internal-testing/tiny-random-LlamaForCausalLM",
38
- "hf-internal-testing/tiny-random-BertForSequenceClassification",
39
- ]
40
- MODELS_TO_TASKS = {
41
- "openai-community/gpt2": "text-generation",
42
- "google-bert/bert-base-uncased": "text-classification",
43
- "hf-internal-testing/tiny-random-LlamaForCausalLM": "text-generation",
44
- "hf-internal-testing/tiny-random-BertForSequenceClassification": "text-classification",
45
- }
46
  TASKS = set(TASKS_TO_OVMODEL.keys()) & set(TASKS_TO_MODEL_LOADERS.keys())
47
 
48
 
@@ -159,19 +149,19 @@ def build_demo():
159
  "</p>"
160
  )
161
 
162
- model = gr.Dropdown(
163
  label="model",
164
- choices=MODELS,
165
- value=MODELS[0],
166
- info="Model to run the benchmark on.",
 
167
  )
168
  task = gr.Dropdown(
169
  label="task",
170
  choices=TASKS,
171
- value=MODELS_TO_TASKS[MODELS[0]],
172
  info="Task to run the benchmark on.",
173
  )
174
-
175
  backends = gr.CheckboxGroup(
176
  interactive=True,
177
  label="backends",
@@ -202,9 +192,7 @@ def build_demo():
202
  with gr.Accordion(label="OpenVINO Output", open=True, visible=True):
203
  openvino_output = gr.Markdown()
204
 
205
- model.change(
206
- inputs=model, outputs=task, fn=lambda value: MODELS_TO_TASKS[value]
207
- )
208
 
209
  backends.change(
210
  inputs=backends,
 
13
 
14
  import gradio as gr
15
  from huggingface_hub import create_repo, whoami
16
+ from gradio_huggingfacehub_search import HuggingfaceHubSearch
17
  from optimum_benchmark.launchers.device_isolation_utils import * # noqa
18
  from optimum_benchmark.backends.openvino.utils import TASKS_TO_OVMODEL
19
  from optimum_benchmark.backends.transformers_utils import TASKS_TO_MODEL_LOADERS
 
26
  OVConfig,
27
  )
28
  from optimum_benchmark.logging_utils import setup_logging
29
+ from optimum_benchmark.task_utils import infer_task_from_model_name_or_path
30
 
31
 
32
  DEVICE = "cpu"
33
  LAUNCHER = "process"
34
  SCENARIO = "inference"
35
  BACKENDS = ["pytorch", "openvino"]
 
 
 
 
 
 
 
 
 
 
 
 
36
  TASKS = set(TASKS_TO_OVMODEL.keys()) & set(TASKS_TO_MODEL_LOADERS.keys())
37
 
38
 
 
149
  "</p>"
150
  )
151
 
152
+ model = HuggingfaceHubSearch(
153
  label="model",
154
+ search_type="model",
155
+ value="openai-community/gpt2",
156
+ placeholder="Search for a model",
157
+ sumbit_on_select=True,
158
  )
159
  task = gr.Dropdown(
160
  label="task",
161
  choices=TASKS,
162
+ value="text-generation",
163
  info="Task to run the benchmark on.",
164
  )
 
165
  backends = gr.CheckboxGroup(
166
  interactive=True,
167
  label="backends",
 
192
  with gr.Accordion(label="OpenVINO Output", open=True, visible=True):
193
  openvino_output = gr.Markdown()
194
 
195
+ model.submit(inputs=model, outputs=task, fn=infer_task_from_model_name_or_path)
 
 
196
 
197
  backends.change(
198
  inputs=backends,
requirements.txt CHANGED
@@ -1 +1,2 @@
 
1
  optimum-benchmark[openvino]@git+https://github.com/huggingface/optimum-benchmark.git@markdown-report
 
1
+ gradio_huggingfacehub_search
2
  optimum-benchmark[openvino]@git+https://github.com/huggingface/optimum-benchmark.git@markdown-report