Spaces:
Runtime error
Runtime error
design edits to space
Browse files- app.py +12 -7
- data_mnist +1 -1
- metrics.json +1 -1
- model.pth +1 -1
- optimizer.pth +1 -1
- utils.py +1 -3
app.py
CHANGED
@@ -120,8 +120,8 @@ class MNISTCorrupted(Dataset):
|
|
120 |
self.transform = transform
|
121 |
corrupted_dir="./mnist_c"
|
122 |
files = [f.name for f in os.scandir(corrupted_dir)]
|
123 |
-
images = [np.load(os.path.join(os.path.join(corrupted_dir,f),'test_images.npy')) for f in files]
|
124 |
-
labels = [np.load(os.path.join(os.path.join(corrupted_dir,f),'test_labels.npy')) for f in files]
|
125 |
self.data = np.vstack(images)
|
126 |
self.labels = np.hstack(labels)
|
127 |
|
@@ -423,13 +423,18 @@ def main():
|
|
423 |
|
424 |
|
425 |
image_input =gr.inputs.Image(source="canvas",shape=(28,28),invert_colors=True,image_mode="L",type="pil")
|
426 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
427 |
|
428 |
-
|
429 |
-
|
430 |
-
number_dropdown = gr.Dropdown(choices=[i for i in range(10)],type='value',default=None,label="What was the correct prediction?")
|
431 |
|
432 |
-
|
433 |
|
434 |
output_result = gr.outputs.HTML()
|
435 |
adversarial_number = gr.Variable(value=0)
|
|
|
120 |
self.transform = transform
|
121 |
corrupted_dir="./mnist_c"
|
122 |
files = [f.name for f in os.scandir(corrupted_dir)]
|
123 |
+
images = [np.load(os.path.join(os.path.join(corrupted_dir,f),'test_images.npy'))[:200] for f in files]
|
124 |
+
labels = [np.load(os.path.join(os.path.join(corrupted_dir,f),'test_labels.npy'))[:200] for f in files]
|
125 |
self.data = np.vstack(images)
|
126 |
self.labels = np.hstack(labels)
|
127 |
|
|
|
423 |
|
424 |
|
425 |
image_input =gr.inputs.Image(source="canvas",shape=(28,28),invert_colors=True,image_mode="L",type="pil")
|
426 |
+
gr.Markdown(MODEL_IS_WRONG)
|
427 |
+
|
428 |
+
with gr.Row():
|
429 |
+
label_output = gr.outputs.Label(num_top_classes=2)
|
430 |
+
|
431 |
+
|
432 |
+
number_dropdown = gr.Dropdown(choices=[i for i in range(10)],type='value',default=None,label="What was the correct prediction?")
|
433 |
|
434 |
+
with gr.Row():
|
435 |
+
submit = gr.Button("Submit")
|
|
|
436 |
|
437 |
+
flag_btn = gr.Button("Flag")
|
438 |
|
439 |
output_result = gr.outputs.HTML()
|
440 |
adversarial_number = gr.Variable(value=0)
|
data_mnist
CHANGED
@@ -1 +1 @@
|
|
1 |
-
Subproject commit
|
|
|
1 |
+
Subproject commit 351aced7a740962dd354c05cf67efb1f1652739f
|
metrics.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"all": [10.55875015258789], "0": [0.0], "1": [0.0], "2": [0.0], "3": [43.33333206176758], "4": [86.66666412353516], "5": [0.0], "6": [0.0], "7": [0.0], "8": [0.0], "9": [0.0]}
|
|
|
1 |
+
{"all": [10.55875015258789], "0": [0.0, 0.0], "1": [0.0, 0.0], "2": [0.0, 0.0], "3": [43.33333206176758, 100.0], "4": [86.66666412353516, 0.0], "5": [0.0, 0.0], "6": [0.0, 0.0], "7": [0.0, 0.0], "8": [0.0, 0.0], "9": [0.0, 0.0]}
|
model.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 89871
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e6fb83a68fe8dca1a7a9bc9db3029071edaf292ab2c3fda48ac3661579efe873
|
3 |
size 89871
|
optimizer.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 89807
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:29d01887c93ca1c9b69aee6ceb2f77c3c0db91936d5c13e92d5dfc7075bb2237
|
3 |
size 89807
|
utils.py
CHANGED
@@ -20,9 +20,7 @@ WHAT_TO_DO="""
|
|
20 |
"""
|
21 |
|
22 |
MODEL_IS_WRONG = """
|
23 |
-
|
24 |
-
|
25 |
-
### Did the model get it wrong? Choose the correct prediction below and flag it. When you flag it, the instance is saved to our dataset and the model is trained on it.
|
26 |
"""
|
27 |
DEFAULT_TEST_METRIC = "<html> Current test metric - Avg. loss: 1000, Accuracy: 30/1000 (30%) </html>"
|
28 |
|
|
|
20 |
"""
|
21 |
|
22 |
MODEL_IS_WRONG = """
|
23 |
+
### Did the model get it wrong or has a low confidence? Choose the correct prediction below and flag it. When you flag it, the instance is saved to our dataset and the model is trained on it.
|
|
|
|
|
24 |
"""
|
25 |
DEFAULT_TEST_METRIC = "<html> Current test metric - Avg. loss: 1000, Accuracy: 30/1000 (30%) </html>"
|
26 |
|