login-button-attempt

#182
README.md CHANGED
@@ -4,9 +4,16 @@ emoji: 🐢🔍
4
  colorFrom: blue
5
  colorTo: indigo
6
  sdk: gradio
7
- sdk_version: 4.7.1
8
  app_file: app.py
9
  pinned: false
 
 
 
 
 
 
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
4
  colorFrom: blue
5
  colorTo: indigo
6
  sdk: gradio
7
+ sdk_version: 4.16.0
8
  app_file: app.py
9
  pinned: false
10
+
11
+ hf_oauth: true
12
+ # optional, default duration is 8 hours/480 minutes. Max duration is 30 days/43200 minutes.
13
+ hf_oauth_expiration_minutes: 480
14
+ # optional, see "Scopes" below. "openid profile" is always included.
15
+ hf_oauth_scopes:
16
+ - inference-api
17
  ---
18
 
19
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app_debug.py CHANGED
@@ -63,9 +63,9 @@ def get_queue_status():
63
  current = pipe.current
64
  if current is None:
65
  current = "None"
66
- return f'<div style="padding-top: 5%">Current job: {html.escape(current)} <br/> Jobs in queue: <br/> {"".join(get_jobs_info_in_queue())}</div>'
67
  else:
68
- return '<div style="padding-top: 5%">No jobs in queue, please submit an evaluation task from another tab.</div>'
69
 
70
 
71
  def get_demo():
 
63
  current = pipe.current
64
  if current is None:
65
  current = "None"
66
+ return f'<div style="padding-top: 5%">Current job: {html.escape(current)} <br/> Job queue: <br/> {"".join(get_jobs_info_in_queue())}</div>'
67
  else:
68
+ return '<div style="padding-top: 5%">No jobs waiting, please submit an evaluation task from Text-Classification tab.</div>'
69
 
70
 
71
  def get_demo():
app_leaderboard.py CHANGED
@@ -96,25 +96,25 @@ def get_demo(leaderboard_tab):
96
  display_df = get_display_df(default_df) # the styled dataframe to display
97
 
98
  with gr.Row():
99
- with gr.Column():
100
- issue_columns_select = gr.CheckboxGroup(
101
- label="Issue Columns",
102
- choices=issue_columns,
103
- value=[],
104
- interactive=True,
105
- )
106
  with gr.Column():
107
  info_columns_select = gr.CheckboxGroup(
108
  label="Info Columns",
109
  choices=info_columns,
110
  value=default_columns,
111
  interactive=True,
 
 
 
 
 
 
 
112
  )
113
 
114
  with gr.Row():
115
  task_select = gr.Dropdown(
116
  label="Task",
117
- choices=["text_classification", "tabular"],
118
  value="text_classification",
119
  interactive=True,
120
  )
 
96
  display_df = get_display_df(default_df) # the styled dataframe to display
97
 
98
  with gr.Row():
 
 
 
 
 
 
 
99
  with gr.Column():
100
  info_columns_select = gr.CheckboxGroup(
101
  label="Info Columns",
102
  choices=info_columns,
103
  value=default_columns,
104
  interactive=True,
105
+ )
106
+ with gr.Column():
107
+ issue_columns_select = gr.CheckboxGroup(
108
+ label="Issue Columns",
109
+ choices=issue_columns,
110
+ value=[],
111
+ interactive=True,
112
  )
113
 
114
  with gr.Row():
115
  task_select = gr.Dropdown(
116
  label="Task",
117
+ choices=["text_classification"],
118
  value="text_classification",
119
  interactive=True,
120
  )
app_text_classification.py CHANGED
@@ -8,7 +8,6 @@ from text_classification_ui_helpers import (
8
  align_columns_and_show_prediction,
9
  get_dataset_splits,
10
  check_dataset,
11
- show_hf_token_info,
12
  precheck_model_ds_enable_example_btn,
13
  try_submit,
14
  empty_column_mapping,
@@ -20,9 +19,8 @@ import logging
20
  from wordings import (
21
  CONFIRM_MAPPING_DETAILS_MD,
22
  INTRODUCTION_MD,
23
- USE_INFERENCE_API_TIP,
24
  CHECK_LOG_SECTION_RAW,
25
- HF_TOKEN_INVALID_STYLED
26
  )
27
 
28
  MAX_LABELS = 40
@@ -38,6 +36,10 @@ def get_demo():
38
  uid_label = gr.Textbox(
39
  label="Evaluation ID:", value=uuid.uuid4, visible=False, interactive=False
40
  )
 
 
 
 
41
  with gr.Row():
42
  model_id_input = gr.Textbox(
43
  label="Hugging Face Model id",
@@ -92,26 +94,7 @@ def get_demo():
92
  for _ in range(MAX_LABELS, MAX_LABELS + MAX_FEATURES):
93
  column_mappings.append(gr.Dropdown(visible=False))
94
 
95
- with gr.Accordion(label="Model Wrap Advance Config", open=True):
96
- gr.HTML(USE_INFERENCE_API_TIP)
97
-
98
- run_inference = gr.Checkbox(value=True, label="Run with Inference API")
99
- inference_token = gr.Textbox(
100
- placeholder="hf_xxxxxxxxxxxxxxxxxxxx",
101
- value="",
102
- label="HF Token for Inference API",
103
- visible=True,
104
- interactive=True,
105
- )
106
- inference_token_info = gr.HTML(value=HF_TOKEN_INVALID_STYLED, visible=False)
107
-
108
- inference_token.change(
109
- fn=show_hf_token_info,
110
- inputs=[inference_token],
111
- outputs=[inference_token_info],
112
- )
113
-
114
- with gr.Accordion(label="Scanner Advance Config (optional)", open=False):
115
  scanners = gr.CheckboxGroup(visible=True)
116
 
117
  @gr.on(triggers=[uid_label.change], inputs=[uid_label], outputs=[scanners])
@@ -145,7 +128,7 @@ def get_demo():
145
  with gr.Row():
146
  logs = gr.Textbox(
147
  value=CHECK_LOG_SECTION_RAW,
148
- label="Giskard Bot Evaluation Guide:",
149
  visible=False,
150
  every=0.5,
151
  )
@@ -156,7 +139,7 @@ def get_demo():
156
  gr.on(
157
  triggers=[model_id_input.change],
158
  fn=get_related_datasets_from_leaderboard,
159
- inputs=[model_id_input],
160
  outputs=[dataset_id_input],
161
  ).then(
162
  fn=check_dataset,
@@ -233,8 +216,6 @@ def get_demo():
233
  dataset_config_input,
234
  dataset_split_input,
235
  uid_label,
236
- run_inference,
237
- inference_token,
238
  ],
239
  outputs=[
240
  validation_result,
@@ -257,8 +238,6 @@ def get_demo():
257
  dataset_id_input,
258
  dataset_config_input,
259
  dataset_split_input,
260
- run_inference,
261
- inference_token,
262
  uid_label,
263
  ],
264
  outputs=[
@@ -274,15 +253,11 @@ def get_demo():
274
 
275
  gr.on(
276
  triggers=[
277
- run_inference.input,
278
- inference_token.input,
279
  scanners.input,
280
  ],
281
  fn=enable_run_btn,
282
  inputs=[
283
  uid_label,
284
- run_inference,
285
- inference_token,
286
  model_id_input,
287
  dataset_id_input,
288
  dataset_config_input,
@@ -296,8 +271,6 @@ def get_demo():
296
  fn=enable_run_btn,
297
  inputs=[
298
  uid_label,
299
- run_inference,
300
- inference_token,
301
  model_id_input,
302
  dataset_id_input,
303
  dataset_config_input,
 
8
  align_columns_and_show_prediction,
9
  get_dataset_splits,
10
  check_dataset,
 
11
  precheck_model_ds_enable_example_btn,
12
  try_submit,
13
  empty_column_mapping,
 
19
  from wordings import (
20
  CONFIRM_MAPPING_DETAILS_MD,
21
  INTRODUCTION_MD,
22
+ LOG_IN_TIPS,
23
  CHECK_LOG_SECTION_RAW,
 
24
  )
25
 
26
  MAX_LABELS = 40
 
36
  uid_label = gr.Textbox(
37
  label="Evaluation ID:", value=uuid.uuid4, visible=False, interactive=False
38
  )
39
+
40
+ with gr.Accordion(label="Login to Use This Space", open=True):
41
+ gr.HTML(LOG_IN_TIPS)
42
+ gr.LoginButton()
43
  with gr.Row():
44
  model_id_input = gr.Textbox(
45
  label="Hugging Face Model id",
 
94
  for _ in range(MAX_LABELS, MAX_LABELS + MAX_FEATURES):
95
  column_mappings.append(gr.Dropdown(visible=False))
96
 
97
+ with gr.Accordion(label="Scanner Advanced Config (optional)", open=False):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  scanners = gr.CheckboxGroup(visible=True)
99
 
100
  @gr.on(triggers=[uid_label.change], inputs=[uid_label], outputs=[scanners])
 
128
  with gr.Row():
129
  logs = gr.Textbox(
130
  value=CHECK_LOG_SECTION_RAW,
131
+ label="Log",
132
  visible=False,
133
  every=0.5,
134
  )
 
139
  gr.on(
140
  triggers=[model_id_input.change],
141
  fn=get_related_datasets_from_leaderboard,
142
+ inputs=[model_id_input, dataset_id_input],
143
  outputs=[dataset_id_input],
144
  ).then(
145
  fn=check_dataset,
 
216
  dataset_config_input,
217
  dataset_split_input,
218
  uid_label,
 
 
219
  ],
220
  outputs=[
221
  validation_result,
 
238
  dataset_id_input,
239
  dataset_config_input,
240
  dataset_split_input,
 
 
241
  uid_label,
242
  ],
243
  outputs=[
 
253
 
254
  gr.on(
255
  triggers=[
 
 
256
  scanners.input,
257
  ],
258
  fn=enable_run_btn,
259
  inputs=[
260
  uid_label,
 
 
261
  model_id_input,
262
  dataset_id_input,
263
  dataset_config_input,
 
271
  fn=enable_run_btn,
272
  inputs=[
273
  uid_label,
 
 
274
  model_id_input,
275
  dataset_id_input,
276
  dataset_config_input,
requirements.txt CHANGED
@@ -1,8 +1,9 @@
1
- giskard
2
  huggingface_hub
3
  hf-transfer
4
  torch==2.0.1
5
  transformers
6
  datasets
7
  tabulate
 
8
  -e git+https://github.com/Giskard-AI/cicd.git#egg=giskard-cicd
 
1
+ -e git+https://github.com/Giskard-AI/giskard.git@b4849c93e1b40f5597d6509c4e9c58b0fff10483#egg=giskard
2
  huggingface_hub
3
  hf-transfer
4
  torch==2.0.1
5
  transformers
6
  datasets
7
  tabulate
8
+ gradio[oauth]
9
  -e git+https://github.com/Giskard-AI/cicd.git#egg=giskard-cicd
run_jobs.py CHANGED
@@ -50,7 +50,6 @@ def prepare_env_and_get_command(
50
  d_id,
51
  config,
52
  split,
53
- inference,
54
  inference_token,
55
  uid,
56
  label_mapping,
@@ -60,10 +59,6 @@ def prepare_env_and_get_command(
60
  if os.environ.get("SPACE_ID") == "giskardai/giskard-evaluator":
61
  leaderboard_dataset = LEADERBOARD
62
 
63
- inference_type = "hf_pipeline"
64
- if inference and inference_token:
65
- inference_type = "hf_inference_api"
66
-
67
  executable = "giskard_scanner"
68
  try:
69
  # Copy the current requirements (might be changed)
@@ -100,9 +95,10 @@ def prepare_env_and_get_command(
100
  "--scan_config",
101
  get_submitted_yaml_path(uid),
102
  "--inference_type",
103
- inference_type,
104
  "--inference_api_token",
105
  inference_token,
 
106
  ]
107
  # The token to publish post
108
  if os.environ.get(HF_WRITE_TOKEN):
 
50
  d_id,
51
  config,
52
  split,
 
53
  inference_token,
54
  uid,
55
  label_mapping,
 
59
  if os.environ.get("SPACE_ID") == "giskardai/giskard-evaluator":
60
  leaderboard_dataset = LEADERBOARD
61
 
 
 
 
 
62
  executable = "giskard_scanner"
63
  try:
64
  # Copy the current requirements (might be changed)
 
95
  "--scan_config",
96
  get_submitted_yaml_path(uid),
97
  "--inference_type",
98
+ "hf_inference_api",
99
  "--inference_api_token",
100
  inference_token,
101
+ "--persist_scan",
102
  ]
103
  # The token to publish post
104
  if os.environ.get(HF_WRITE_TOKEN):
text_classification.py CHANGED
@@ -102,9 +102,8 @@ def hf_inference_api(model_id, hf_token, payload):
102
  except Exception:
103
  return {"error": response.content}
104
 
105
- def preload_hf_inference_api(model_id):
106
  payload = {"inputs": "This is a test", "options": {"use_cache": True, }}
107
- hf_token = os.environ.get(HF_WRITE_TOKEN, default="")
108
  hf_inference_api(model_id, hf_token, payload)
109
 
110
  def check_model_pipeline(model_id):
 
102
  except Exception:
103
  return {"error": response.content}
104
 
105
+ def preload_hf_inference_api(model_id, hf_token):
106
  payload = {"inputs": "This is a test", "options": {"use_cache": True, }}
 
107
  hf_inference_api(model_id, hf_token, payload)
108
 
109
  def check_model_pipeline(model_id):
text_classification_ui_helpers.py CHANGED
@@ -21,7 +21,6 @@ from text_classification import (
21
  preload_hf_inference_api,
22
  get_example_prediction,
23
  get_labels_and_features_from_dataset,
24
- check_hf_token_validity,
25
  HuggingFaceInferenceAPIResponse,
26
  )
27
  from wordings import (
@@ -43,7 +42,7 @@ MAX_FEATURES = 20
43
  ds_dict = None
44
  ds_config = None
45
 
46
- def get_related_datasets_from_leaderboard(model_id):
47
  records = leaderboard.records
48
  model_id = strip_model_id_from_url(model_id)
49
  model_records = records[records["model_id"] == model_id]
@@ -52,7 +51,10 @@ def get_related_datasets_from_leaderboard(model_id):
52
  if len(datasets_unique) == 0:
53
  return gr.update(choices=[])
54
 
55
- return gr.update(choices=datasets_unique)
 
 
 
56
 
57
 
58
  logger = logging.getLogger(__file__)
@@ -179,11 +181,11 @@ def list_labels_and_features_from_dataset(ds_labels, ds_features, model_labels,
179
 
180
 
181
  def precheck_model_ds_enable_example_btn(
182
- model_id, dataset_id, dataset_config, dataset_split
183
  ):
184
  model_id = strip_model_id_from_url(model_id)
185
  model_task = check_model_task(model_id)
186
- preload_hf_inference_api(model_id)
187
 
188
  if dataset_config is None or dataset_split is None or len(dataset_config) == 0:
189
  return (
@@ -249,8 +251,8 @@ def align_columns_and_show_prediction(
249
  dataset_config,
250
  dataset_split,
251
  uid,
252
- run_inference,
253
- inference_token,
254
  ):
255
  model_id = strip_model_id_from_url(model_id)
256
  model_task = check_model_task(model_id)
@@ -268,11 +270,9 @@ def align_columns_and_show_prediction(
268
  dropdown_placement = [
269
  gr.Dropdown(visible=False) for _ in range(MAX_LABELS + MAX_FEATURES)
270
  ]
271
-
272
- hf_token = os.environ.get(HF_WRITE_TOKEN, default="")
273
 
274
  prediction_input, prediction_response = get_example_prediction(
275
- model_id, dataset_id, dataset_config, dataset_split, hf_token
276
  )
277
 
278
  if prediction_input is None or prediction_response is None:
@@ -344,7 +344,7 @@ def align_columns_and_show_prediction(
344
  gr.update(value=prediction_input, lines=min(len(prediction_input)//225 + 1, 5), visible=True),
345
  gr.update(value=prediction_response, visible=True),
346
  gr.update(visible=True, open=True),
347
- gr.update(interactive=(run_inference and inference_token != "")),
348
  "",
349
  *column_mappings,
350
  )
@@ -354,7 +354,7 @@ def align_columns_and_show_prediction(
354
  gr.update(value=prediction_input, lines=min(len(prediction_input)//225 + 1, 5), visible=True),
355
  gr.update(value=prediction_response, visible=True),
356
  gr.update(visible=True, open=False),
357
- gr.update(interactive=(run_inference and inference_token != "")),
358
  "",
359
  *column_mappings,
360
  )
@@ -372,9 +372,10 @@ def check_column_mapping_keys_validity(all_mappings):
372
 
373
  return True
374
 
375
- def enable_run_btn(uid, run_inference, inference_token, model_id, dataset_id, dataset_config, dataset_split):
376
- if not run_inference or inference_token == "":
377
- logger.warn("Inference API is not enabled")
 
378
  return gr.update(interactive=False)
379
  if model_id == "" or dataset_id == "" or dataset_config == "" or dataset_split == "":
380
  logger.warn("Model id or dataset id is not selected")
@@ -385,11 +386,6 @@ def enable_run_btn(uid, run_inference, inference_token, model_id, dataset_id, da
385
  logger.warn("Column mapping is not valid")
386
  return gr.update(interactive=False)
387
 
388
- if not check_hf_token_validity(inference_token):
389
- logger.warn("HF token is not valid")
390
- return gr.update(interactive=False)
391
- return gr.update(interactive=True)
392
-
393
  def construct_label_and_feature_mapping(all_mappings, ds_labels, ds_features, label_keys=None):
394
  label_mapping = {}
395
  if len(all_mappings["labels"].keys()) != len(ds_labels):
@@ -413,13 +409,7 @@ def construct_label_and_feature_mapping(all_mappings, ds_labels, ds_features, la
413
  feature_mapping.update({"label": label_keys[0]})
414
  return label_mapping, feature_mapping
415
 
416
- def show_hf_token_info(token):
417
- valid = check_hf_token_validity(token)
418
- if not valid:
419
- return gr.update(visible=True)
420
- return gr.update(visible=False)
421
-
422
- def try_submit(m_id, d_id, config, split, inference, inference_token, uid):
423
  all_mappings = read_column_mapping(uid)
424
  if not check_column_mapping_keys_validity(all_mappings):
425
  return (gr.update(interactive=True), gr.update(visible=False))
@@ -437,8 +427,7 @@ def try_submit(m_id, d_id, config, split, inference, inference_token, uid):
437
  d_id,
438
  config,
439
  split,
440
- inference,
441
- inference_token,
442
  uid,
443
  label_mapping,
444
  feature_mapping,
 
21
  preload_hf_inference_api,
22
  get_example_prediction,
23
  get_labels_and_features_from_dataset,
 
24
  HuggingFaceInferenceAPIResponse,
25
  )
26
  from wordings import (
 
42
  ds_dict = None
43
  ds_config = None
44
 
45
+ def get_related_datasets_from_leaderboard(model_id, dataset_id_input):
46
  records = leaderboard.records
47
  model_id = strip_model_id_from_url(model_id)
48
  model_records = records[records["model_id"] == model_id]
 
51
  if len(datasets_unique) == 0:
52
  return gr.update(choices=[])
53
 
54
+ if dataset_id_input in datasets_unique:
55
+ return gr.update(choices=datasets_unique)
56
+
57
+ return gr.update(choices=datasets_unique, value="")
58
 
59
 
60
  logger = logging.getLogger(__file__)
 
181
 
182
 
183
  def precheck_model_ds_enable_example_btn(
184
+ model_id, dataset_id, dataset_config, dataset_split, profile: gr.OAuthProfile | None, oath_token: gr.OAuthToken | None
185
  ):
186
  model_id = strip_model_id_from_url(model_id)
187
  model_task = check_model_task(model_id)
188
+ preload_hf_inference_api(model_id, oath_token.token)
189
 
190
  if dataset_config is None or dataset_split is None or len(dataset_config) == 0:
191
  return (
 
251
  dataset_config,
252
  dataset_split,
253
  uid,
254
+ profile: gr.OAuthProfile | None,
255
+ oauth_token: gr.OAuthToken | None,
256
  ):
257
  model_id = strip_model_id_from_url(model_id)
258
  model_task = check_model_task(model_id)
 
270
  dropdown_placement = [
271
  gr.Dropdown(visible=False) for _ in range(MAX_LABELS + MAX_FEATURES)
272
  ]
 
 
273
 
274
  prediction_input, prediction_response = get_example_prediction(
275
+ model_id, dataset_id, dataset_config, dataset_split, oauth_token.token
276
  )
277
 
278
  if prediction_input is None or prediction_response is None:
 
344
  gr.update(value=prediction_input, lines=min(len(prediction_input)//225 + 1, 5), visible=True),
345
  gr.update(value=prediction_response, visible=True),
346
  gr.update(visible=True, open=True),
347
+ gr.update(interactive=(profile is not None and oauth_token is not None)),
348
  "",
349
  *column_mappings,
350
  )
 
354
  gr.update(value=prediction_input, lines=min(len(prediction_input)//225 + 1, 5), visible=True),
355
  gr.update(value=prediction_response, visible=True),
356
  gr.update(visible=True, open=False),
357
+ gr.update(interactive=(profile is not None and oauth_token is not None)),
358
  "",
359
  *column_mappings,
360
  )
 
372
 
373
  return True
374
 
375
+ def enable_run_btn(uid, model_id, dataset_id, dataset_config, dataset_split, profile: gr.OAuthProfile | None, oath_token: gr.OAuthToken | None):
376
+ if profile is None:
377
+ return gr.update(interactive=False)
378
+ if oath_token is None:
379
  return gr.update(interactive=False)
380
  if model_id == "" or dataset_id == "" or dataset_config == "" or dataset_split == "":
381
  logger.warn("Model id or dataset id is not selected")
 
386
  logger.warn("Column mapping is not valid")
387
  return gr.update(interactive=False)
388
 
 
 
 
 
 
389
  def construct_label_and_feature_mapping(all_mappings, ds_labels, ds_features, label_keys=None):
390
  label_mapping = {}
391
  if len(all_mappings["labels"].keys()) != len(ds_labels):
 
409
  feature_mapping.update({"label": label_keys[0]})
410
  return label_mapping, feature_mapping
411
 
412
+ def try_submit(m_id, d_id, config, split, uid, profile: gr.OAuthProfile | None, oath_token: gr.OAuthToken | None):
 
 
 
 
 
 
413
  all_mappings = read_column_mapping(uid)
414
  if not check_column_mapping_keys_validity(all_mappings):
415
  return (gr.update(interactive=True), gr.update(visible=False))
 
427
  d_id,
428
  config,
429
  split,
430
+ oath_token.token,
 
431
  uid,
432
  label_mapping,
433
  feature_mapping,
wordings.py CHANGED
@@ -1,14 +1,16 @@
1
  INTRODUCTION_MD = """
2
- <h1 style="text-align: center;">
3
  🐢Giskard Evaluator - Text Classification
4
- </h1>
5
  Welcome to the Giskard Evaluator Space! Get a model vulnerability report immediately by simply sharing your model and dataset id below.
6
  You can also checkout our library documentation <a href="https://docs.giskard.ai/en/latest/getting_started/quickstart/index.html">here</a>.
7
  """
8
  CONFIRM_MAPPING_DETAILS_MD = """
 
9
  <h1 style="text-align: center;">
10
  Confirm Pre-processing Details
11
  </h1>
 
12
  Make sure the output variable's labels and the input variable's name are accurately mapped across both the dataset and the model. You can select the output variable's labels from the dropdowns below.
13
  """
14
  CONFIRM_MAPPING_DETAILS_FAIL_MD = """
@@ -26,9 +28,7 @@ CHECK_CONFIG_OR_SPLIT_RAW = """
26
  Please check your dataset config or split.
27
  """
28
 
29
- CHECK_LOG_SECTION_RAW = """
30
- Your have successfully submitted a Giskard evaluation. Further details are available in the Logs tab. You can find your report will be posted to your model's community discussion.
31
- """
32
 
33
  PREDICTION_SAMPLE_MD = """
34
  <h1 style="text-align: center;">
@@ -58,7 +58,11 @@ USE_INFERENCE_API_TIP = """
58
  <a href="https://huggingface.co/docs/api-inference/detailed_parameters#text-classification-task">
59
  Hugging Face Inference API
60
  </a>
61
- . Please input your <a href="https://huggingface.co/settings/tokens">Hugging Face token</a> to do so. You can find it <a href="https://huggingface.co/settings/tokens">here</a>.
 
 
 
 
62
  """
63
 
64
  HF_TOKEN_INVALID_STYLED= """
 
1
  INTRODUCTION_MD = """
2
+ <div style="display: flex; justify-content: center;"><h1>
3
  🐢Giskard Evaluator - Text Classification
4
+ </h1></div>
5
  Welcome to the Giskard Evaluator Space! Get a model vulnerability report immediately by simply sharing your model and dataset id below.
6
  You can also checkout our library documentation <a href="https://docs.giskard.ai/en/latest/getting_started/quickstart/index.html">here</a>.
7
  """
8
  CONFIRM_MAPPING_DETAILS_MD = """
9
+ <div style="display: flex; justify-content: center;"><h1>
10
  <h1 style="text-align: center;">
11
  Confirm Pre-processing Details
12
  </h1>
13
+ </div>
14
  Make sure the output variable's labels and the input variable's name are accurately mapped across both the dataset and the model. You can select the output variable's labels from the dropdowns below.
15
  """
16
  CONFIRM_MAPPING_DETAILS_FAIL_MD = """
 
28
  Please check your dataset config or split.
29
  """
30
 
31
+ CHECK_LOG_SECTION_RAW = """Your have successfully submitted a Giskard evaluation job. Further details are available in the Logs tab. You can find your report posted in your model's community discussion section."""
 
 
32
 
33
  PREDICTION_SAMPLE_MD = """
34
  <h1 style="text-align: center;">
 
58
  <a href="https://huggingface.co/docs/api-inference/detailed_parameters#text-classification-task">
59
  Hugging Face Inference API
60
  </a>
61
+ . Please input your <a href="https://huggingface.co/docs/hub/security-tokens#user-access-tokens">Hugging Face token</a> to do so. You can find it <a href="https://huggingface.co/settings/tokens">here</a>.
62
+ """
63
+
64
+ LOG_IN_TIPS = """
65
+ To use the Hugging Face Inference API, you need to log in to your Hugging Face account.
66
  """
67
 
68
  HF_TOKEN_INVALID_STYLED= """