ZeroCommand commited on
Commit
7eb3c10
1 Parent(s): 2694247

use login button

Browse files
app_text_classification.py CHANGED
@@ -8,7 +8,6 @@ from text_classification_ui_helpers import (
8
  align_columns_and_show_prediction,
9
  get_dataset_splits,
10
  check_dataset,
11
- show_hf_token_info,
12
  precheck_model_ds_enable_example_btn,
13
  try_submit,
14
  empty_column_mapping,
@@ -20,9 +19,8 @@ import logging
20
  from wordings import (
21
  CONFIRM_MAPPING_DETAILS_MD,
22
  INTRODUCTION_MD,
23
- USE_INFERENCE_API_TIP,
24
  CHECK_LOG_SECTION_RAW,
25
- HF_TOKEN_INVALID_STYLED
26
  )
27
 
28
  MAX_LABELS = 40
@@ -93,23 +91,8 @@ def get_demo():
93
  column_mappings.append(gr.Dropdown(visible=False))
94
 
95
  with gr.Accordion(label="Model Wrap Advance Config", open=True):
96
- gr.HTML(USE_INFERENCE_API_TIP)
97
-
98
- run_inference = gr.Checkbox(value=True, label="Run with Inference API")
99
- inference_token = gr.Textbox(
100
- placeholder="hf_xxxxxxxxxxxxxxxxxxxx",
101
- value="",
102
- label="HF Token for Inference API",
103
- visible=True,
104
- interactive=True,
105
- )
106
- inference_token_info = gr.HTML(value=HF_TOKEN_INVALID_STYLED, visible=False)
107
-
108
- inference_token.change(
109
- fn=show_hf_token_info,
110
- inputs=[inference_token],
111
- outputs=[inference_token_info],
112
- )
113
 
114
  with gr.Accordion(label="Scanner Advance Config (optional)", open=False):
115
  scanners = gr.CheckboxGroup(visible=True)
@@ -233,8 +216,6 @@ def get_demo():
233
  dataset_config_input,
234
  dataset_split_input,
235
  uid_label,
236
- run_inference,
237
- inference_token,
238
  ],
239
  outputs=[
240
  validation_result,
@@ -257,8 +238,6 @@ def get_demo():
257
  dataset_id_input,
258
  dataset_config_input,
259
  dataset_split_input,
260
- run_inference,
261
- inference_token,
262
  uid_label,
263
  ],
264
  outputs=[
@@ -274,15 +253,11 @@ def get_demo():
274
 
275
  gr.on(
276
  triggers=[
277
- run_inference.input,
278
- inference_token.input,
279
  scanners.input,
280
  ],
281
  fn=enable_run_btn,
282
  inputs=[
283
  uid_label,
284
- run_inference,
285
- inference_token,
286
  model_id_input,
287
  dataset_id_input,
288
  dataset_config_input,
@@ -296,8 +271,6 @@ def get_demo():
296
  fn=enable_run_btn,
297
  inputs=[
298
  uid_label,
299
- run_inference,
300
- inference_token,
301
  model_id_input,
302
  dataset_id_input,
303
  dataset_config_input,
 
8
  align_columns_and_show_prediction,
9
  get_dataset_splits,
10
  check_dataset,
 
11
  precheck_model_ds_enable_example_btn,
12
  try_submit,
13
  empty_column_mapping,
 
19
  from wordings import (
20
  CONFIRM_MAPPING_DETAILS_MD,
21
  INTRODUCTION_MD,
22
+ LOG_IN_TIPS,
23
  CHECK_LOG_SECTION_RAW,
 
24
  )
25
 
26
  MAX_LABELS = 40
 
91
  column_mappings.append(gr.Dropdown(visible=False))
92
 
93
  with gr.Accordion(label="Model Wrap Advance Config", open=True):
94
+ gr.HTML(LOG_IN_TIPS)
95
+ gr.LoginButton()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
  with gr.Accordion(label="Scanner Advance Config (optional)", open=False):
98
  scanners = gr.CheckboxGroup(visible=True)
 
216
  dataset_config_input,
217
  dataset_split_input,
218
  uid_label,
 
 
219
  ],
220
  outputs=[
221
  validation_result,
 
238
  dataset_id_input,
239
  dataset_config_input,
240
  dataset_split_input,
 
 
241
  uid_label,
242
  ],
243
  outputs=[
 
253
 
254
  gr.on(
255
  triggers=[
 
 
256
  scanners.input,
257
  ],
258
  fn=enable_run_btn,
259
  inputs=[
260
  uid_label,
 
 
261
  model_id_input,
262
  dataset_id_input,
263
  dataset_config_input,
 
271
  fn=enable_run_btn,
272
  inputs=[
273
  uid_label,
 
 
274
  model_id_input,
275
  dataset_id_input,
276
  dataset_config_input,
run_jobs.py CHANGED
@@ -50,7 +50,6 @@ def prepare_env_and_get_command(
50
  d_id,
51
  config,
52
  split,
53
- inference,
54
  inference_token,
55
  uid,
56
  label_mapping,
@@ -60,10 +59,6 @@ def prepare_env_and_get_command(
60
  if os.environ.get("SPACE_ID") == "giskardai/giskard-evaluator":
61
  leaderboard_dataset = LEADERBOARD
62
 
63
- inference_type = "hf_pipeline"
64
- if inference and inference_token:
65
- inference_type = "hf_inference_api"
66
-
67
  executable = "giskard_scanner"
68
  try:
69
  # Copy the current requirements (might be changed)
@@ -100,7 +95,7 @@ def prepare_env_and_get_command(
100
  "--scan_config",
101
  get_submitted_yaml_path(uid),
102
  "--inference_type",
103
- inference_type,
104
  "--inference_api_token",
105
  inference_token,
106
  ]
 
50
  d_id,
51
  config,
52
  split,
 
53
  inference_token,
54
  uid,
55
  label_mapping,
 
59
  if os.environ.get("SPACE_ID") == "giskardai/giskard-evaluator":
60
  leaderboard_dataset = LEADERBOARD
61
 
 
 
 
 
62
  executable = "giskard_scanner"
63
  try:
64
  # Copy the current requirements (might be changed)
 
95
  "--scan_config",
96
  get_submitted_yaml_path(uid),
97
  "--inference_type",
98
+ "hf_inference_api",
99
  "--inference_api_token",
100
  inference_token,
101
  ]
text_classification_ui_helpers.py CHANGED
@@ -249,8 +249,8 @@ def align_columns_and_show_prediction(
249
  dataset_config,
250
  dataset_split,
251
  uid,
252
- run_inference,
253
- inference_token,
254
  ):
255
  model_id = strip_model_id_from_url(model_id)
256
  model_task = check_model_task(model_id)
@@ -344,7 +344,7 @@ def align_columns_and_show_prediction(
344
  gr.update(value=prediction_input, lines=min(len(prediction_input)//225 + 1, 5), visible=True),
345
  gr.update(value=prediction_response, visible=True),
346
  gr.update(visible=True, open=True),
347
- gr.update(interactive=(run_inference and inference_token != "")),
348
  "",
349
  *column_mappings,
350
  )
@@ -354,7 +354,7 @@ def align_columns_and_show_prediction(
354
  gr.update(value=prediction_input, lines=min(len(prediction_input)//225 + 1, 5), visible=True),
355
  gr.update(value=prediction_response, visible=True),
356
  gr.update(visible=True, open=False),
357
- gr.update(interactive=(run_inference and inference_token != "")),
358
  "",
359
  *column_mappings,
360
  )
@@ -372,9 +372,10 @@ def check_column_mapping_keys_validity(all_mappings):
372
 
373
  return True
374
 
375
- def enable_run_btn(uid, run_inference, inference_token, model_id, dataset_id, dataset_config, dataset_split):
376
- if not run_inference or inference_token == "":
377
- logger.warn("Inference API is not enabled")
 
378
  return gr.update(interactive=False)
379
  if model_id == "" or dataset_id == "" or dataset_config == "" or dataset_split == "":
380
  logger.warn("Model id or dataset id is not selected")
@@ -385,11 +386,6 @@ def enable_run_btn(uid, run_inference, inference_token, model_id, dataset_id, da
385
  logger.warn("Column mapping is not valid")
386
  return gr.update(interactive=False)
387
 
388
- if not check_hf_token_validity(inference_token):
389
- logger.warn("HF token is not valid")
390
- return gr.update(interactive=False)
391
- return gr.update(interactive=True)
392
-
393
  def construct_label_and_feature_mapping(all_mappings, ds_labels, ds_features, label_keys=None):
394
  label_mapping = {}
395
  if len(all_mappings["labels"].keys()) != len(ds_labels):
@@ -419,7 +415,7 @@ def show_hf_token_info(token):
419
  return gr.update(visible=True)
420
  return gr.update(visible=False)
421
 
422
- def try_submit(m_id, d_id, config, split, inference, inference_token, uid):
423
  all_mappings = read_column_mapping(uid)
424
  if not check_column_mapping_keys_validity(all_mappings):
425
  return (gr.update(interactive=True), gr.update(visible=False))
@@ -437,8 +433,7 @@ def try_submit(m_id, d_id, config, split, inference, inference_token, uid):
437
  d_id,
438
  config,
439
  split,
440
- inference,
441
- inference_token,
442
  uid,
443
  label_mapping,
444
  feature_mapping,
 
249
  dataset_config,
250
  dataset_split,
251
  uid,
252
+ profile: gr.OAuthProfile | None,
253
+ oauth_token: gr.OAuthToken | None,
254
  ):
255
  model_id = strip_model_id_from_url(model_id)
256
  model_task = check_model_task(model_id)
 
344
  gr.update(value=prediction_input, lines=min(len(prediction_input)//225 + 1, 5), visible=True),
345
  gr.update(value=prediction_response, visible=True),
346
  gr.update(visible=True, open=True),
347
+ gr.update(interactive=(profile is not None and oauth_token is not None)),
348
  "",
349
  *column_mappings,
350
  )
 
354
  gr.update(value=prediction_input, lines=min(len(prediction_input)//225 + 1, 5), visible=True),
355
  gr.update(value=prediction_response, visible=True),
356
  gr.update(visible=True, open=False),
357
+ gr.update(interactive=(profile is not None and oauth_token is not None)),
358
  "",
359
  *column_mappings,
360
  )
 
372
 
373
  return True
374
 
375
+ def enable_run_btn(uid, model_id, dataset_id, dataset_config, dataset_split, profile: gr.OAuthProfile | None, oath_token: gr.OAuthToken | None):
376
+ if profile is None:
377
+ return gr.update(interactive=False)
378
+ if oath_token is None:
379
  return gr.update(interactive=False)
380
  if model_id == "" or dataset_id == "" or dataset_config == "" or dataset_split == "":
381
  logger.warn("Model id or dataset id is not selected")
 
386
  logger.warn("Column mapping is not valid")
387
  return gr.update(interactive=False)
388
 
 
 
 
 
 
389
  def construct_label_and_feature_mapping(all_mappings, ds_labels, ds_features, label_keys=None):
390
  label_mapping = {}
391
  if len(all_mappings["labels"].keys()) != len(ds_labels):
 
415
  return gr.update(visible=True)
416
  return gr.update(visible=False)
417
 
418
+ def try_submit(m_id, d_id, config, split, uid, profile: gr.OAuthProfile | None, oath_token: gr.OAuthToken | None):
419
  all_mappings = read_column_mapping(uid)
420
  if not check_column_mapping_keys_validity(all_mappings):
421
  return (gr.update(interactive=True), gr.update(visible=False))
 
433
  d_id,
434
  config,
435
  split,
436
+ oath_token.token,
 
437
  uid,
438
  label_mapping,
439
  feature_mapping,
wordings.py CHANGED
@@ -1,7 +1,7 @@
1
  INTRODUCTION_MD = """
2
- <h1 style="text-align: center;">
3
  🐢Giskard Evaluator - Text Classification
4
- </h1>
5
  Welcome to the Giskard Evaluator Space! Get a model vulnerability report immediately by simply sharing your model and dataset id below.
6
  You can also checkout our library documentation <a href="https://docs.giskard.ai/en/latest/getting_started/quickstart/index.html">here</a>.
7
  """
@@ -61,6 +61,10 @@ USE_INFERENCE_API_TIP = """
61
  . Please input your <a href="https://huggingface.co/settings/tokens">Hugging Face token</a> to do so. You can find it <a href="https://huggingface.co/settings/tokens">here</a>.
62
  """
63
 
 
 
 
 
64
  HF_TOKEN_INVALID_STYLED= """
65
  <p style="text-align: left;color: red; ">
66
  Your Hugging Face token is invalid. Please double check your token.
 
1
  INTRODUCTION_MD = """
2
+ <div style="display: flex; justify-content: center;"><h1>
3
  🐢Giskard Evaluator - Text Classification
4
+ </h1></div>
5
  Welcome to the Giskard Evaluator Space! Get a model vulnerability report immediately by simply sharing your model and dataset id below.
6
  You can also checkout our library documentation <a href="https://docs.giskard.ai/en/latest/getting_started/quickstart/index.html">here</a>.
7
  """
 
61
  . Please input your <a href="https://huggingface.co/settings/tokens">Hugging Face token</a> to do so. You can find it <a href="https://huggingface.co/settings/tokens">here</a>.
62
  """
63
 
64
+ LOG_IN_TIPS = """
65
+ To use the Hugging Face Inference API, you need to log in to your Hugging Face account.
66
+ """
67
+
68
  HF_TOKEN_INVALID_STYLED= """
69
  <p style="text-align: left;color: red; ">
70
  Your Hugging Face token is invalid. Please double check your token.