freyam
commited on
Commit
•
eee0fd3
1
Parent(s):
0998e6d
Cleanup app launching and error handling for FileNotFound
Browse files
app.py
CHANGED
@@ -140,66 +140,79 @@ def evaluate():
|
|
140 |
|
141 |
|
142 |
def load_dataset(local_dataset, hf_dataset):
|
143 |
-
|
144 |
-
|
145 |
-
os.path.
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
|
|
|
|
|
|
165 |
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
203 |
|
204 |
|
205 |
def import_dataset(dataset_sampling_method, dataset_sampling_size, dataset_column):
|
@@ -453,35 +466,3 @@ with BiasAware:
|
|
453 |
|
454 |
if __name__ == "__main__":
|
455 |
BiasAware.launch()
|
456 |
-
|
457 |
-
|
458 |
-
if __name__ == "__main__":
|
459 |
-
BiasAware.launch()
|
460 |
-
|
461 |
-
|
462 |
-
if __name__ == "__main__":
|
463 |
-
BiasAware.launch()
|
464 |
-
|
465 |
-
|
466 |
-
if __name__ == "__main__":
|
467 |
-
BiasAware.launch()
|
468 |
-
|
469 |
-
|
470 |
-
if __name__ == "__main__":
|
471 |
-
BiasAware.launch()
|
472 |
-
|
473 |
-
|
474 |
-
if __name__ == "__main__":
|
475 |
-
BiasAware.launch()
|
476 |
-
|
477 |
-
|
478 |
-
if __name__ == "__main__":
|
479 |
-
BiasAware.launch()
|
480 |
-
|
481 |
-
|
482 |
-
if __name__ == "__main__":
|
483 |
-
BiasAware.launch()
|
484 |
-
|
485 |
-
|
486 |
-
if __name__ == "__main__":
|
487 |
-
BiasAware.launch()
|
|
|
140 |
|
141 |
|
142 |
def load_dataset(local_dataset, hf_dataset):
|
143 |
+
try:
|
144 |
+
if local_dataset:
|
145 |
+
EVALUATION["dataset_id"] = os.path.splitext(
|
146 |
+
os.path.basename(local_dataset.name)
|
147 |
+
)[0]
|
148 |
+
EVALUATION["source"] = "Local Dataset"
|
149 |
+
EVALUATION["df"] = pd.read_csv(local_dataset.name)
|
150 |
+
else:
|
151 |
+
EVALUATION["dataset_id"] = hf_dataset
|
152 |
+
EVALUATION["source"] = "HuggingFace Hub"
|
153 |
+
EVALUATION["df"] = hf_load_dataset(
|
154 |
+
hf_dataset, split="train[0:100]"
|
155 |
+
).to_pandas()
|
156 |
+
|
157 |
+
columns = EVALUATION["df"].select_dtypes(include=["object"]).columns.tolist()
|
158 |
+
column_corpus = EVALUATION["df"][columns[0]].tolist()[:5]
|
159 |
+
|
160 |
+
dataset_sampling_method = gr.Radio(
|
161 |
+
label="Scope",
|
162 |
+
info="Determines the scope of the dataset to be analyzed",
|
163 |
+
choices=["First", "Last", "Random"],
|
164 |
+
value="First",
|
165 |
+
visible=True,
|
166 |
+
interactive=True,
|
167 |
+
)
|
168 |
|
169 |
+
dataset_sampling_size = gr.Slider(
|
170 |
+
label=f"Number of Entries",
|
171 |
+
info=f"Determines the number of entries to be analyzed. Due to computational constraints, the maximum number of entries that can be analyzed is {SAMPLING_SIZE_THRESHOLD}.",
|
172 |
+
minimum=1,
|
173 |
+
maximum=min(EVALUATION["df"].shape[0], SAMPLING_SIZE_THRESHOLD),
|
174 |
+
value=min(EVALUATION["df"].shape[0], SAMPLING_SIZE_THRESHOLD),
|
175 |
+
visible=True,
|
176 |
+
interactive=True,
|
177 |
+
)
|
178 |
|
179 |
+
dataset_column = gr.Radio(
|
180 |
+
label="Column",
|
181 |
+
info="Determines the column to be analyzed. These are the columns with text data.",
|
182 |
+
choices=columns,
|
183 |
+
value=columns[0],
|
184 |
+
visible=True,
|
185 |
+
interactive=True,
|
186 |
+
)
|
187 |
|
188 |
+
dataset_column_corpus = gr.Dataframe(
|
189 |
+
value=pd.DataFrame({f"{columns[0]}": column_corpus}), visible=True
|
190 |
+
)
|
191 |
|
192 |
+
dataset_import_btn = gr.Button(
|
193 |
+
value="Import Dataset",
|
194 |
+
interactive=True,
|
195 |
+
variant="primary",
|
196 |
+
visible=True,
|
197 |
+
)
|
198 |
|
199 |
+
return (
|
200 |
+
dataset_sampling_method,
|
201 |
+
dataset_sampling_size,
|
202 |
+
dataset_column,
|
203 |
+
dataset_column_corpus,
|
204 |
+
dataset_import_btn,
|
205 |
+
)
|
206 |
+
|
207 |
+
except FileNotFoundError as e:
|
208 |
+
print(f"FileNotFoundError: {e}")
|
209 |
+
return (
|
210 |
+
gr.Radio(visible=False),
|
211 |
+
gr.Slider(visible=False),
|
212 |
+
gr.Radio(visible=False),
|
213 |
+
gr.Dataframe(visible=False),
|
214 |
+
gr.Button(visible=False),
|
215 |
+
)
|
216 |
|
217 |
|
218 |
def import_dataset(dataset_sampling_method, dataset_sampling_size, dataset_column):
|
|
|
466 |
|
467 |
if __name__ == "__main__":
|
468 |
BiasAware.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|