ZeroCommand commited on
Commit
0fe497d
1 Parent(s): 2694247

fix wordings and minor layout issue

Browse files
app_debug.py CHANGED
@@ -63,9 +63,9 @@ def get_queue_status():
63
  current = pipe.current
64
  if current is None:
65
  current = "None"
66
- return f'<div style="padding-top: 5%">Current job: {html.escape(current)} <br/> Jobs in queue: <br/> {"".join(get_jobs_info_in_queue())}</div>'
67
  else:
68
- return '<div style="padding-top: 5%">No jobs in queue, please submit an evaluation task from another tab.</div>'
69
 
70
 
71
  def get_demo():
 
63
  current = pipe.current
64
  if current is None:
65
  current = "None"
66
+ return f'<div style="padding-top: 5%">Current job: {html.escape(current)} <br/> Job queue: <br/> {"".join(get_jobs_info_in_queue())}</div>'
67
  else:
68
+ return '<div style="padding-top: 5%">No jobs waiting, please submit an evaluation task from Text-Classification tab.</div>'
69
 
70
 
71
  def get_demo():
app_leaderboard.py CHANGED
@@ -96,25 +96,25 @@ def get_demo(leaderboard_tab):
96
  display_df = get_display_df(default_df) # the styled dataframe to display
97
 
98
  with gr.Row():
99
- with gr.Column():
100
- issue_columns_select = gr.CheckboxGroup(
101
- label="Issue Columns",
102
- choices=issue_columns,
103
- value=[],
104
- interactive=True,
105
- )
106
  with gr.Column():
107
  info_columns_select = gr.CheckboxGroup(
108
  label="Info Columns",
109
  choices=info_columns,
110
  value=default_columns,
111
  interactive=True,
 
 
 
 
 
 
 
112
  )
113
 
114
  with gr.Row():
115
  task_select = gr.Dropdown(
116
  label="Task",
117
- choices=["text_classification", "tabular"],
118
  value="text_classification",
119
  interactive=True,
120
  )
 
96
  display_df = get_display_df(default_df) # the styled dataframe to display
97
 
98
  with gr.Row():
 
 
 
 
 
 
 
99
  with gr.Column():
100
  info_columns_select = gr.CheckboxGroup(
101
  label="Info Columns",
102
  choices=info_columns,
103
  value=default_columns,
104
  interactive=True,
105
+ )
106
+ with gr.Column():
107
+ issue_columns_select = gr.CheckboxGroup(
108
+ label="Issue Columns",
109
+ choices=issue_columns,
110
+ value=[],
111
+ interactive=True,
112
  )
113
 
114
  with gr.Row():
115
  task_select = gr.Dropdown(
116
  label="Task",
117
+ choices=["text_classification"],
118
  value="text_classification",
119
  interactive=True,
120
  )
app_text_classification.py CHANGED
@@ -92,7 +92,7 @@ def get_demo():
92
  for _ in range(MAX_LABELS, MAX_LABELS + MAX_FEATURES):
93
  column_mappings.append(gr.Dropdown(visible=False))
94
 
95
- with gr.Accordion(label="Model Wrap Advance Config", open=True):
96
  gr.HTML(USE_INFERENCE_API_TIP)
97
 
98
  run_inference = gr.Checkbox(value=True, label="Run with Inference API")
@@ -111,7 +111,7 @@ def get_demo():
111
  outputs=[inference_token_info],
112
  )
113
 
114
- with gr.Accordion(label="Scanner Advance Config (optional)", open=False):
115
  scanners = gr.CheckboxGroup(visible=True)
116
 
117
  @gr.on(triggers=[uid_label.change], inputs=[uid_label], outputs=[scanners])
@@ -145,8 +145,8 @@ def get_demo():
145
  with gr.Row():
146
  logs = gr.Textbox(
147
  value=CHECK_LOG_SECTION_RAW,
148
- label="Giskard Bot Evaluation Guide:",
149
- visible=False,
150
  every=0.5,
151
  )
152
 
@@ -156,7 +156,7 @@ def get_demo():
156
  gr.on(
157
  triggers=[model_id_input.change],
158
  fn=get_related_datasets_from_leaderboard,
159
- inputs=[model_id_input],
160
  outputs=[dataset_id_input],
161
  ).then(
162
  fn=check_dataset,
 
92
  for _ in range(MAX_LABELS, MAX_LABELS + MAX_FEATURES):
93
  column_mappings.append(gr.Dropdown(visible=False))
94
 
95
+ with gr.Accordion(label="Model Wrap Advanced Config", open=True):
96
  gr.HTML(USE_INFERENCE_API_TIP)
97
 
98
  run_inference = gr.Checkbox(value=True, label="Run with Inference API")
 
111
  outputs=[inference_token_info],
112
  )
113
 
114
+ with gr.Accordion(label="Scanner Advanced Config (optional)", open=False):
115
  scanners = gr.CheckboxGroup(visible=True)
116
 
117
  @gr.on(triggers=[uid_label.change], inputs=[uid_label], outputs=[scanners])
 
145
  with gr.Row():
146
  logs = gr.Textbox(
147
  value=CHECK_LOG_SECTION_RAW,
148
+ label="Log",
149
+ visible=True,
150
  every=0.5,
151
  )
152
 
 
156
  gr.on(
157
  triggers=[model_id_input.change],
158
  fn=get_related_datasets_from_leaderboard,
159
+ inputs=[model_id_input, dataset_id_input],
160
  outputs=[dataset_id_input],
161
  ).then(
162
  fn=check_dataset,
text_classification_ui_helpers.py CHANGED
@@ -43,7 +43,7 @@ MAX_FEATURES = 20
43
  ds_dict = None
44
  ds_config = None
45
 
46
- def get_related_datasets_from_leaderboard(model_id):
47
  records = leaderboard.records
48
  model_id = strip_model_id_from_url(model_id)
49
  model_records = records[records["model_id"] == model_id]
@@ -52,7 +52,10 @@ def get_related_datasets_from_leaderboard(model_id):
52
  if len(datasets_unique) == 0:
53
  return gr.update(choices=[])
54
 
55
- return gr.update(choices=datasets_unique)
 
 
 
56
 
57
 
58
  logger = logging.getLogger(__file__)
 
43
  ds_dict = None
44
  ds_config = None
45
 
46
+ def get_related_datasets_from_leaderboard(model_id, dataset_id_input):
47
  records = leaderboard.records
48
  model_id = strip_model_id_from_url(model_id)
49
  model_records = records[records["model_id"] == model_id]
 
52
  if len(datasets_unique) == 0:
53
  return gr.update(choices=[])
54
 
55
+ if dataset_id_input in datasets_unique:
56
+ return gr.update(choices=datasets_unique)
57
+
58
+ return gr.update(choices=datasets_unique, value="")
59
 
60
 
61
  logger = logging.getLogger(__file__)
wordings.py CHANGED
@@ -1,7 +1,9 @@
1
  INTRODUCTION_MD = """
 
2
  <h1 style="text-align: center;">
3
  🐢Giskard Evaluator - Text Classification
4
  </h1>
 
5
  Welcome to the Giskard Evaluator Space! Get a model vulnerability report immediately by simply sharing your model and dataset id below.
6
  You can also checkout our library documentation <a href="https://docs.giskard.ai/en/latest/getting_started/quickstart/index.html">here</a>.
7
  """
@@ -26,9 +28,7 @@ CHECK_CONFIG_OR_SPLIT_RAW = """
26
  Please check your dataset config or split.
27
  """
28
 
29
- CHECK_LOG_SECTION_RAW = """
30
- Your have successfully submitted a Giskard evaluation. Further details are available in the Logs tab. You can find your report will be posted to your model's community discussion.
31
- """
32
 
33
  PREDICTION_SAMPLE_MD = """
34
  <h1 style="text-align: center;">
 
1
  INTRODUCTION_MD = """
2
+ <div style="display: flex; justify-content: center;">
3
  <h1 style="text-align: center;">
4
  🐢Giskard Evaluator - Text Classification
5
  </h1>
6
+ </div>
7
  Welcome to the Giskard Evaluator Space! Get a model vulnerability report immediately by simply sharing your model and dataset id below.
8
  You can also checkout our library documentation <a href="https://docs.giskard.ai/en/latest/getting_started/quickstart/index.html">here</a>.
9
  """
 
28
  Please check your dataset config or split.
29
  """
30
 
31
+ CHECK_LOG_SECTION_RAW = """Your have successfully submitted a Giskard evaluation job. Further details are available in the Logs tab. You can find your report posted in your model's community discussion section."""
 
 
32
 
33
  PREDICTION_SAMPLE_MD = """
34
  <h1 style="text-align: center;">