BenchmarkBot commited on
Commit
0eea6c1
β€’
1 Parent(s): 8e785e9

fix search bar

Browse files
Files changed (2) hide show
  1. app.py +25 -2
  2. src/assets/text_content.py +1 -1
app.py CHANGED
@@ -1,11 +1,12 @@
1
  import os
 
2
  import gradio as gr
3
  import pandas as pd
4
  from apscheduler.schedulers.background import BackgroundScheduler
5
 
6
  from src.assets.text_content import TITLE, INTRODUCTION_TEXT, CITATION_BUTTON_LABEL, CITATION_BUTTON_TEXT
7
- from src.assets.css_html_js import custom_css
8
  from src.utils import restart_space, load_dataset_repo, make_clickable_model
 
9
 
10
 
11
  LLM_PERF_LEADERBOARD_REPO = "optimum/llm-perf-leaderboard"
@@ -45,8 +46,22 @@ def get_benchmark_df(benchmark):
45
  return df
46
 
47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  def search_table(df, query):
49
- filtered_df = df[df["model"].str.contains(query, case=False)]
50
  return filtered_df
51
 
52
 
@@ -132,6 +147,14 @@ with demo:
132
  elem_id="citation-button",
133
  ).style(show_copy_button=True)
134
 
 
 
 
 
 
 
 
 
135
  # Restart space every hour
136
  scheduler = BackgroundScheduler()
137
  scheduler.add_job(restart_space, "interval", seconds=3600,
 
1
  import os
2
+ import json
3
  import gradio as gr
4
  import pandas as pd
5
  from apscheduler.schedulers.background import BackgroundScheduler
6
 
7
  from src.assets.text_content import TITLE, INTRODUCTION_TEXT, CITATION_BUTTON_LABEL, CITATION_BUTTON_TEXT
 
8
  from src.utils import restart_space, load_dataset_repo, make_clickable_model
9
+ from src.assets.css_html_js import custom_css, get_window_url_params
10
 
11
 
12
  LLM_PERF_LEADERBOARD_REPO = "optimum/llm-perf-leaderboard"
 
46
  return df
47
 
48
 
49
+ def change_tab(query_param):
50
+ query_param = query_param.replace("'", '"')
51
+ query_param = json.loads(query_param)
52
+
53
+ if (
54
+ isinstance(query_param, dict)
55
+ and "tab" in query_param
56
+ and query_param["tab"] == "evaluation"
57
+ ):
58
+ return gr.Tabs.update(selected=1)
59
+ else:
60
+ return gr.Tabs.update(selected=0)
61
+
62
+
63
  def search_table(df, query):
64
+ filtered_df = df[df["Model πŸ€—"].str.contains(query, case=False)]
65
  return filtered_df
66
 
67
 
 
147
  elem_id="citation-button",
148
  ).style(show_copy_button=True)
149
 
150
+ dummy = gr.Textbox(visible=False)
151
+ demo.load(
152
+ change_tab,
153
+ dummy,
154
+ tabs,
155
+ _js=get_window_url_params,
156
+ )
157
+
158
  # Restart space every hour
159
  scheduler = BackgroundScheduler()
160
  scheduler.add_job(restart_space, "interval", seconds=3600,
src/assets/text_content.py CHANGED
@@ -4,7 +4,7 @@ INTRODUCTION_TEXT = f"""
4
  The πŸ€— Open LLM-Perf Leaderboard πŸ‹οΈ aims to benchmark the performance (latency & throughput) of Large Language Models (LLMs) on different hardwares and backends using [Optimum-Benchmark](https://github.com/huggingface/optimum-benchmark) and [Optimum](https://github.com/huggingface/optimum) flavors.
5
  Anyone from the community can submit a model or a hardware+backend configuration for automated benchmarking:
6
  - Model submissions should be made in the [πŸ€— Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) and will be added to the πŸ€— Open LLM-Perf Leaderboard πŸ‹οΈ once they're publicly available.
7
- - Hardware+Backend submissions should be made in the πŸ€— Open LLM-Perf Leaderboard πŸ‹οΈ [community discussions](https://huggingface.co/spaces/optimum/llm-perf-leaderboard/discussions); An automated process will be set up soon to allow for direct submissions.
8
 
9
  [Config files](https://github.com/huggingface/optimum-benchmark/blob/main/examples/bert.yaml) (which can be used with Optimum-Benchmark) will be available soon for reproduction and questioning/correction of the results.
10
  """
 
4
  The πŸ€— Open LLM-Perf Leaderboard πŸ‹οΈ aims to benchmark the performance (latency & throughput) of Large Language Models (LLMs) on different hardwares and backends using [Optimum-Benchmark](https://github.com/huggingface/optimum-benchmark) and [Optimum](https://github.com/huggingface/optimum) flavors.
5
  Anyone from the community can submit a model or a hardware+backend configuration for automated benchmarking:
6
  - Model submissions should be made in the [πŸ€— Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) and will be added to the πŸ€— Open LLM-Perf Leaderboard πŸ‹οΈ once they're publicly available.
7
+ - Hardware+Backend submissions should be made in the πŸ€— Open LLM-Perf Leaderboard πŸ‹οΈ [community discussions](https://huggingface.co/spaces/optimum/llm-perf-leaderboard/discussions); An automated process will be set up soon.
8
 
9
  [Config files](https://github.com/huggingface/optimum-benchmark/blob/main/examples/bert.yaml) (which can be used with Optimum-Benchmark) will be available soon for reproduction and questioning/correction of the results.
10
  """