ZeroCommand commited on
Commit
96a1184
2 Parent(s): f913de3 6811286

merge and resolve conflicts

Browse files
app_debug.py CHANGED
@@ -63,9 +63,9 @@ def get_queue_status():
63
  current = pipe.current
64
  if current is None:
65
  current = "None"
66
- return f'<div style="padding-top: 5%">Current job: {html.escape(current)} <br/> Jobs in queue: <br/> {"".join(get_jobs_info_in_queue())}</div>'
67
  else:
68
- return '<div style="padding-top: 5%">No jobs in queue, please submit an evaluation task from another tab.</div>'
69
 
70
 
71
  def get_demo():
 
63
  current = pipe.current
64
  if current is None:
65
  current = "None"
66
+ return f'<div style="padding-top: 5%">Current job: {html.escape(current)} <br/> Job queue: <br/> {"".join(get_jobs_info_in_queue())}</div>'
67
  else:
68
+ return '<div style="padding-top: 5%">No jobs waiting, please submit an evaluation task from Text-Classification tab.</div>'
69
 
70
 
71
  def get_demo():
app_leaderboard.py CHANGED
@@ -96,25 +96,25 @@ def get_demo(leaderboard_tab):
96
  display_df = get_display_df(default_df) # the styled dataframe to display
97
 
98
  with gr.Row():
99
- with gr.Column():
100
- issue_columns_select = gr.CheckboxGroup(
101
- label="Issue Columns",
102
- choices=issue_columns,
103
- value=[],
104
- interactive=True,
105
- )
106
  with gr.Column():
107
  info_columns_select = gr.CheckboxGroup(
108
  label="Info Columns",
109
  choices=info_columns,
110
  value=default_columns,
111
  interactive=True,
 
 
 
 
 
 
 
112
  )
113
 
114
  with gr.Row():
115
  task_select = gr.Dropdown(
116
  label="Task",
117
- choices=["text_classification", "tabular"],
118
  value="text_classification",
119
  interactive=True,
120
  )
 
96
  display_df = get_display_df(default_df) # the styled dataframe to display
97
 
98
  with gr.Row():
 
 
 
 
 
 
 
99
  with gr.Column():
100
  info_columns_select = gr.CheckboxGroup(
101
  label="Info Columns",
102
  choices=info_columns,
103
  value=default_columns,
104
  interactive=True,
105
+ )
106
+ with gr.Column():
107
+ issue_columns_select = gr.CheckboxGroup(
108
+ label="Issue Columns",
109
+ choices=issue_columns,
110
+ value=[],
111
+ interactive=True,
112
  )
113
 
114
  with gr.Row():
115
  task_select = gr.Dropdown(
116
  label="Task",
117
+ choices=["text_classification"],
118
  value="text_classification",
119
  interactive=True,
120
  )
app_text_classification.py CHANGED
@@ -36,6 +36,10 @@ def get_demo():
36
  uid_label = gr.Textbox(
37
  label="Evaluation ID:", value=uuid.uuid4, visible=False, interactive=False
38
  )
 
 
 
 
39
  with gr.Row():
40
  model_id_input = gr.Textbox(
41
  label="Hugging Face Model id",
@@ -90,11 +94,7 @@ def get_demo():
90
  for _ in range(MAX_LABELS, MAX_LABELS + MAX_FEATURES):
91
  column_mappings.append(gr.Dropdown(visible=False))
92
 
93
- with gr.Accordion(label="Model Wrap Advance Config", open=True):
94
- gr.HTML(LOG_IN_TIPS)
95
- gr.LoginButton()
96
-
97
- with gr.Accordion(label="Scanner Advance Config (optional)", open=False):
98
  scanners = gr.CheckboxGroup(visible=True)
99
 
100
  @gr.on(triggers=[uid_label.change], inputs=[uid_label], outputs=[scanners])
@@ -128,7 +128,7 @@ def get_demo():
128
  with gr.Row():
129
  logs = gr.Textbox(
130
  value=CHECK_LOG_SECTION_RAW,
131
- label="Giskard Bot Evaluation Guide:",
132
  visible=False,
133
  every=0.5,
134
  )
@@ -139,7 +139,7 @@ def get_demo():
139
  gr.on(
140
  triggers=[model_id_input.change],
141
  fn=get_related_datasets_from_leaderboard,
142
- inputs=[model_id_input],
143
  outputs=[dataset_id_input],
144
  ).then(
145
  fn=check_dataset,
 
36
  uid_label = gr.Textbox(
37
  label="Evaluation ID:", value=uuid.uuid4, visible=False, interactive=False
38
  )
39
+
40
+ with gr.Accordion(label="Login to Use This Space", open=True):
41
+ gr.HTML(LOG_IN_TIPS)
42
+ gr.LoginButton()
43
  with gr.Row():
44
  model_id_input = gr.Textbox(
45
  label="Hugging Face Model id",
 
94
  for _ in range(MAX_LABELS, MAX_LABELS + MAX_FEATURES):
95
  column_mappings.append(gr.Dropdown(visible=False))
96
 
97
+ with gr.Accordion(label="Scanner Advanced Config (optional)", open=False):
 
 
 
 
98
  scanners = gr.CheckboxGroup(visible=True)
99
 
100
  @gr.on(triggers=[uid_label.change], inputs=[uid_label], outputs=[scanners])
 
128
  with gr.Row():
129
  logs = gr.Textbox(
130
  value=CHECK_LOG_SECTION_RAW,
131
+ label="Log",
132
  visible=False,
133
  every=0.5,
134
  )
 
139
  gr.on(
140
  triggers=[model_id_input.change],
141
  fn=get_related_datasets_from_leaderboard,
142
+ inputs=[model_id_input, dataset_id_input],
143
  outputs=[dataset_id_input],
144
  ).then(
145
  fn=check_dataset,
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
- giskard
2
  huggingface_hub
3
  hf-transfer
4
  torch==2.0.1
 
1
+ -e git+https://github.com/Giskard-AI/giskard.git@b4849c93e1b40f5597d6509c4e9c58b0fff10483#egg=giskard
2
  huggingface_hub
3
  hf-transfer
4
  torch==2.0.1
run_jobs.py CHANGED
@@ -98,6 +98,7 @@ def prepare_env_and_get_command(
98
  "hf_inference_api",
99
  "--inference_api_token",
100
  inference_token,
 
101
  ]
102
  # The token to publish post
103
  if os.environ.get(HF_WRITE_TOKEN):
 
98
  "hf_inference_api",
99
  "--inference_api_token",
100
  inference_token,
101
+ "--persist_scan",
102
  ]
103
  # The token to publish post
104
  if os.environ.get(HF_WRITE_TOKEN):
text_classification_ui_helpers.py CHANGED
@@ -43,7 +43,7 @@ MAX_FEATURES = 20
43
  ds_dict = None
44
  ds_config = None
45
 
46
- def get_related_datasets_from_leaderboard(model_id):
47
  records = leaderboard.records
48
  model_id = strip_model_id_from_url(model_id)
49
  model_records = records[records["model_id"] == model_id]
@@ -52,7 +52,10 @@ def get_related_datasets_from_leaderboard(model_id):
52
  if len(datasets_unique) == 0:
53
  return gr.update(choices=[])
54
 
55
- return gr.update(choices=datasets_unique)
 
 
 
56
 
57
 
58
  logger = logging.getLogger(__file__)
 
43
  ds_dict = None
44
  ds_config = None
45
 
46
+ def get_related_datasets_from_leaderboard(model_id, dataset_id_input):
47
  records = leaderboard.records
48
  model_id = strip_model_id_from_url(model_id)
49
  model_records = records[records["model_id"] == model_id]
 
52
  if len(datasets_unique) == 0:
53
  return gr.update(choices=[])
54
 
55
+ if dataset_id_input in datasets_unique:
56
+ return gr.update(choices=datasets_unique)
57
+
58
+ return gr.update(choices=datasets_unique, value="")
59
 
60
 
61
  logger = logging.getLogger(__file__)
wordings.py CHANGED
@@ -26,9 +26,7 @@ CHECK_CONFIG_OR_SPLIT_RAW = """
26
  Please check your dataset config or split.
27
  """
28
 
29
- CHECK_LOG_SECTION_RAW = """
30
- Your have successfully submitted a Giskard evaluation. Further details are available in the Logs tab. You can find your report will be posted to your model's community discussion.
31
- """
32
 
33
  PREDICTION_SAMPLE_MD = """
34
  <h1 style="text-align: center;">
@@ -58,7 +56,7 @@ USE_INFERENCE_API_TIP = """
58
  <a href="https://huggingface.co/docs/api-inference/detailed_parameters#text-classification-task">
59
  Hugging Face Inference API
60
  </a>
61
- . Please input your <a href="https://huggingface.co/settings/tokens">Hugging Face token</a> to do so. You can find it <a href="https://huggingface.co/settings/tokens">here</a>.
62
  """
63
 
64
  LOG_IN_TIPS = """
 
26
  Please check your dataset config or split.
27
  """
28
 
29
+ CHECK_LOG_SECTION_RAW = """Your have successfully submitted a Giskard evaluation job. Further details are available in the Logs tab. You can find your report posted in your model's community discussion section."""
 
 
30
 
31
  PREDICTION_SAMPLE_MD = """
32
  <h1 style="text-align: center;">
 
56
  <a href="https://huggingface.co/docs/api-inference/detailed_parameters#text-classification-task">
57
  Hugging Face Inference API
58
  </a>
59
+ . Please input your <a href="https://huggingface.co/docs/hub/security-tokens#user-access-tokens">Hugging Face token</a> to do so. You can find it <a href="https://huggingface.co/settings/tokens">here</a>.
60
  """
61
 
62
  LOG_IN_TIPS = """