ZeroCommand commited on
Commit
89d01cf
1 Parent(s): aaa034c

refresh log files not working

Browse files
app_text_classification.py CHANGED
@@ -1,7 +1,8 @@
1
  import gradio as gr
 
2
  from io_utils import read_scanners, write_scanners, read_inference_type, write_inference_type
3
  from wordings import INTRODUCTION_MD, CONFIRM_MAPPING_DETAILS_MD
4
- from text_classification_ui_helpers import try_submit, check_dataset_and_get_config, check_dataset_and_get_split, check_model_and_show_prediction, write_column_mapping_to_config
5
 
6
  MAX_LABELS = 20
7
  MAX_FEATURES = 20
@@ -11,93 +12,125 @@ EXAMPLE_DATA_ID = 'tweet_eval'
11
  CONFIG_PATH='./config.yaml'
12
 
13
  def get_demo():
14
- with gr.Row():
15
- gr.Markdown(INTRODUCTION_MD)
16
- with gr.Row():
17
- model_id_input = gr.Textbox(
18
- label="Hugging Face model id",
19
- placeholder=EXAMPLE_MODEL_ID + " (press enter to confirm)",
20
- )
 
21
 
22
- dataset_id_input = gr.Textbox(
23
- label="Hugging Face Dataset id",
24
- placeholder=EXAMPLE_DATA_ID + " (press enter to confirm)",
25
- )
26
-
27
- with gr.Row():
28
- dataset_config_input = gr.Dropdown(label='Dataset Config', visible=False)
29
- dataset_split_input = gr.Dropdown(label='Dataset Split', visible=False)
30
-
31
- with gr.Row():
32
- example_input = gr.Markdown('Example Input', visible=False)
33
- with gr.Row():
34
- example_prediction = gr.Label(label='Model Prediction Sample', visible=False)
35
-
36
- with gr.Row():
37
- with gr.Accordion(label='Label and Feature Mapping', visible=False, open=False) as column_mapping_accordion:
38
- with gr.Row():
39
- gr.Markdown(CONFIRM_MAPPING_DETAILS_MD)
40
- column_mappings = []
41
- with gr.Row():
42
- with gr.Column():
43
- for _ in range(MAX_LABELS):
44
- column_mappings.append(gr.Dropdown(visible=False))
45
- with gr.Column():
46
- for _ in range(MAX_LABELS, MAX_LABELS + MAX_FEATURES):
47
- column_mappings.append(gr.Dropdown(visible=False))
48
-
49
- with gr.Accordion(label='Model Wrap Advance Config (optional)', open=False):
50
- run_local = gr.Checkbox(value=True, label="Run in this Space")
51
- use_inference = read_inference_type('./config.yaml') == 'hf_inference_api'
52
- run_inference = gr.Checkbox(value=use_inference, label="Run with Inference API")
53
-
54
- with gr.Accordion(label='Scanner Advance Config (optional)', open=False):
55
- selected = read_scanners('./config.yaml')
56
- scan_config = selected + ['data_leakage']
57
- scanners = gr.CheckboxGroup(choices=scan_config, value=selected, label='Scan Settings', visible=True)
 
 
 
58
 
59
- with gr.Row():
60
- run_btn = gr.Button(
61
- "Get Evaluation Result",
62
- variant="primary",
63
- interactive=True,
64
- size="lg",
65
- )
66
-
67
- with gr.Row():
68
- logs = gr.Textbox(label="Giskard Bot Evaluation Log:", visible=False)
69
 
70
- gr.on(triggers=[label.change for label in column_mappings],
71
- fn=write_column_mapping_to_config,
72
- inputs=[dataset_id_input, dataset_config_input, dataset_split_input, *column_mappings])
 
 
 
 
 
 
73
 
74
- gr.on(triggers=[model_id_input.change, dataset_config_input.change, dataset_split_input.change],
75
- fn=check_model_and_show_prediction,
76
- inputs=[model_id_input, dataset_id_input, dataset_config_input, dataset_split_input],
77
- outputs=[example_input, example_prediction, column_mapping_accordion, *column_mappings])
78
 
79
- dataset_id_input.blur(check_dataset_and_get_config, dataset_id_input, dataset_config_input)
80
 
81
- dataset_config_input.change(
82
- check_dataset_and_get_split,
83
- inputs=[dataset_id_input, dataset_config_input],
84
- outputs=[dataset_split_input])
85
 
86
- scanners.change(
87
- write_scanners,
88
- inputs=scanners
89
- )
90
 
91
- run_inference.change(
92
- write_inference_type,
93
- inputs=[run_inference]
94
- )
95
 
96
- gr.on(
97
- triggers=[
98
- run_btn.click,
99
- ],
100
- fn=try_submit,
101
- inputs=[model_id_input, dataset_id_input, dataset_config_input, dataset_split_input, run_local],
102
- outputs=[run_btn, logs])
103
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import uuid
3
  from io_utils import read_scanners, write_scanners, read_inference_type, write_inference_type
4
  from wordings import INTRODUCTION_MD, CONFIRM_MAPPING_DETAILS_MD
5
+ from text_classification_ui_helpers import try_submit, check_dataset_and_get_config, check_dataset_and_get_split, check_model_and_show_prediction, write_column_mapping_to_config, get_logs_file
6
 
7
  MAX_LABELS = 20
8
  MAX_FEATURES = 20
 
12
  CONFIG_PATH='./config.yaml'
13
 
14
  def get_demo():
15
+ with gr.Blocks() as demo:
16
+ with gr.Row():
17
+ gr.Markdown(INTRODUCTION_MD)
18
+ with gr.Row():
19
+ model_id_input = gr.Textbox(
20
+ label="Hugging Face model id",
21
+ placeholder=EXAMPLE_MODEL_ID + " (press enter to confirm)",
22
+ )
23
 
24
+ dataset_id_input = gr.Textbox(
25
+ label="Hugging Face Dataset id",
26
+ placeholder=EXAMPLE_DATA_ID + " (press enter to confirm)",
27
+ )
28
+
29
+ with gr.Row():
30
+ dataset_config_input = gr.Dropdown(label='Dataset Config', visible=False)
31
+ dataset_split_input = gr.Dropdown(label='Dataset Split', visible=False)
32
+
33
+ with gr.Row():
34
+ example_input = gr.Markdown('Example Input', visible=False)
35
+ with gr.Row():
36
+ example_prediction = gr.Label(label='Model Prediction Sample', visible=False)
37
+
38
+ with gr.Row():
39
+ with gr.Accordion(label='Label and Feature Mapping', visible=False, open=False) as column_mapping_accordion:
40
+ with gr.Row():
41
+ gr.Markdown(CONFIRM_MAPPING_DETAILS_MD)
42
+ column_mappings = []
43
+ with gr.Row():
44
+ with gr.Column():
45
+ for _ in range(MAX_LABELS):
46
+ column_mappings.append(gr.Dropdown(visible=False))
47
+ with gr.Column():
48
+ for _ in range(MAX_LABELS, MAX_LABELS + MAX_FEATURES):
49
+ column_mappings.append(gr.Dropdown(visible=False))
50
+
51
+ with gr.Accordion(label='Model Wrap Advance Config (optional)', open=False):
52
+ run_local = gr.Checkbox(value=True, label="Run in this Space")
53
+ use_inference = read_inference_type('./config.yaml') == 'hf_inference_api'
54
+ run_inference = gr.Checkbox(value=use_inference, label="Run with Inference API")
55
+
56
+ with gr.Accordion(label='Scanner Advance Config (optional)', open=False):
57
+ selected = read_scanners('./config.yaml')
58
+ # currently we remove data_leakage from the default scanners
59
+ # Reason: data_leakage barely raises any issues and takes too many requests
60
+ # when using inference API, causing rate limit error
61
+ scan_config = selected + ['data_leakage']
62
+ scanners = gr.CheckboxGroup(choices=scan_config, value=selected, label='Scan Settings', visible=True)
63
 
64
+ with gr.Row():
65
+ run_btn = gr.Button(
66
+ "Get Evaluation Result",
67
+ variant="primary",
68
+ interactive=True,
69
+ size="lg",
70
+ )
 
 
 
71
 
72
+ with gr.Row():
73
+ uid = uuid.uuid4()
74
+ uid_label = gr.Textbox(label="Evaluation ID:", value=uid, visible=False)
75
+ logs = gr.Textbox(label="Giskard Bot Evaluation Log:", visible=False)
76
+ demo.load(get_logs_file, uid_label, logs, every=0.5)
77
+
78
+ gr.on(triggers=[label.change for label in column_mappings],
79
+ fn=write_column_mapping_to_config,
80
+ inputs=[dataset_id_input, dataset_config_input, dataset_split_input, *column_mappings])
81
 
82
+ gr.on(triggers=[model_id_input.change, dataset_config_input.change, dataset_split_input.change],
83
+ fn=check_model_and_show_prediction,
84
+ inputs=[model_id_input, dataset_id_input, dataset_config_input, dataset_split_input],
85
+ outputs=[example_input, example_prediction, column_mapping_accordion, *column_mappings])
86
 
87
+ dataset_id_input.blur(check_dataset_and_get_config, dataset_id_input, dataset_config_input)
88
 
89
+ dataset_config_input.change(
90
+ check_dataset_and_get_split,
91
+ inputs=[dataset_id_input, dataset_config_input],
92
+ outputs=[dataset_split_input])
93
 
94
+ scanners.change(
95
+ write_scanners,
96
+ inputs=scanners
97
+ )
98
 
99
+ run_inference.change(
100
+ write_inference_type,
101
+ inputs=[run_inference]
102
+ )
103
 
104
+ gr.on(
105
+ triggers=[
106
+ run_btn.click,
107
+ ],
108
+ fn=try_submit,
109
+ inputs=[
110
+ model_id_input,
111
+ dataset_id_input,
112
+ dataset_config_input,
113
+ dataset_split_input,
114
+ run_local,
115
+ uid_label],
116
+ outputs=[run_btn, logs])
117
+
118
+ def enable_run_btn():
119
+ return gr.update(interactive=True)
120
+ gr.on(
121
+ triggers=[
122
+ model_id_input.change,
123
+ dataset_config_input.change,
124
+ dataset_split_input.change,
125
+ run_inference.change,
126
+ run_local.change,
127
+ scanners.change],
128
+ fn=enable_run_btn,
129
+ inputs=None,
130
+ outputs=[run_btn])
131
+
132
+ gr.on(
133
+ triggers=[label.change for label in column_mappings],
134
+ fn=enable_run_btn,
135
+ inputs=None,
136
+ outputs=[run_btn])
text_classification_ui_helpers.py CHANGED
@@ -3,7 +3,6 @@ from wordings import CONFIRM_MAPPING_DETAILS_FAIL_RAW
3
  import json
4
  import os
5
  import logging
6
- import uuid
7
  import threading
8
  from io_utils import read_column_mapping, write_column_mapping, save_job_to_pipe, write_log_to_user_file
9
  import datasets
@@ -125,12 +124,14 @@ def check_model_and_show_prediction(model_id, dataset_id, dataset_config, datase
125
  )
126
 
127
  def get_logs_file(uid):
128
- file = open(f"./tmp/{uid}_log")
 
129
  contents = file.readlines()
 
130
  file.close()
131
  return '\n'.join(contents)
132
 
133
- def try_submit(m_id, d_id, config, split, local):
134
  all_mappings = read_column_mapping(CONFIG_PATH)
135
 
136
  if all_mappings is None:
@@ -168,14 +169,14 @@ def try_submit(m_id, d_id, config, split, local):
168
 
169
  eval_str = f"[{m_id}]<{d_id}({config}, {split} set)>"
170
  logging.info(f"Start local evaluation on {eval_str}")
171
- uid = uuid.uuid4()
172
  save_job_to_pipe(uid, command, threading.Lock())
173
  write_log_to_user_file(uid, f"Start local evaluation on {eval_str}. Please wait for your job to start...\n")
174
  gr.Info(f"Start local evaluation on {eval_str}")
175
 
176
  return (
177
  gr.update(interactive=False),
178
- gr.update(value=get_logs_file(uid), visible=True, interactive=False, every=5))
179
 
180
  else:
181
  gr.Info("TODO: Submit task to an endpoint")
 
3
  import json
4
  import os
5
  import logging
 
6
  import threading
7
  from io_utils import read_column_mapping, write_column_mapping, save_job_to_pipe, write_log_to_user_file
8
  import datasets
 
124
  )
125
 
126
  def get_logs_file(uid):
127
+ print("read log file")
128
+ file = open(f"./tmp/{uid}_log", "r")
129
  contents = file.readlines()
130
+ print(contents)
131
  file.close()
132
  return '\n'.join(contents)
133
 
134
+ def try_submit(m_id, d_id, config, split, local, uid):
135
  all_mappings = read_column_mapping(CONFIG_PATH)
136
 
137
  if all_mappings is None:
 
169
 
170
  eval_str = f"[{m_id}]<{d_id}({config}, {split} set)>"
171
  logging.info(f"Start local evaluation on {eval_str}")
172
+ # uid = uuid.uuid4()
173
  save_job_to_pipe(uid, command, threading.Lock())
174
  write_log_to_user_file(uid, f"Start local evaluation on {eval_str}. Please wait for your job to start...\n")
175
  gr.Info(f"Start local evaluation on {eval_str}")
176
 
177
  return (
178
  gr.update(interactive=False),
179
+ gr.update(lines=5, visible=True, interactive=False))
180
 
181
  else:
182
  gr.Info("TODO: Submit task to an endpoint")
wordings.py CHANGED
@@ -8,7 +8,7 @@ CONFIRM_MAPPING_DETAILS_MD = '''
8
  <h1 style="text-align: center;">
9
  Confirm Pre-processing Details
10
  </h1>
11
- Please confirm the pre-processing details below. If you are not sure, please double check your model and dataset.
12
  '''
13
  CONFIRM_MAPPING_DETAILS_FAIL_MD = '''
14
  <h1 style="text-align: center;">
 
8
  <h1 style="text-align: center;">
9
  Confirm Pre-processing Details
10
  </h1>
11
+ Please confirm the pre-processing details below. Align the column names of your model in the <b>dropdown</b> menu to your dataset's. If you are not sure, please double check your model and dataset.
12
  '''
13
  CONFIRM_MAPPING_DETAILS_FAIL_MD = '''
14
  <h1 style="text-align: center;">