erceguder commited on
Commit
522b959
1 Parent(s): d3649fd

initial version

Browse files
Files changed (1) hide show
  1. app.py +85 -24
app.py CHANGED
@@ -3,9 +3,12 @@ import gradio as gr
3
  import numpy as np
4
  from datasets import load_dataset
5
  import os
 
 
6
 
7
  dataset = load_dataset("erceguder/histocan-test", token=os.environ["HF_TOKEN"])
8
 
 
9
  COLOR_PALETTE = {
10
  'others': (0, 0, 0),
11
  't-g1': (0, 192, 0),
@@ -14,10 +17,17 @@ COLOR_PALETTE = {
14
  'normal-mucosa': (0, 32, 255)
15
  }
16
 
 
17
  def files_uploaded(paths):
18
  if len(paths) != 16:
19
  raise gr.Error("16 segmentation masks are needed.")
20
 
 
 
 
 
 
 
21
  def evaluate(paths):
22
  if paths == None:
23
  raise gr.Error("Upload segmentation masks first!")
@@ -42,13 +52,18 @@ def evaluate(paths):
42
  }
43
  scores[class_] = idict
44
 
 
45
  for path in paths:
46
- pred = np.array(Image.open(path.name)) # shape (H, W, 3)
47
- # gt = np.array(Image.open(os.path.basename(file.name)))
48
 
49
- # assert gt.ndim == 2
 
 
 
 
 
50
  assert pred.ndim == 3 and pred.shape[-1] == 3
51
- assert gt.shape == pred.shape[:-1]
52
 
53
  # Get predictions for all classes
54
  out = [(pred == color).all(axis=-1) for color in COLOR_PALETTE.values()]
@@ -57,18 +72,18 @@ def evaluate(paths):
57
  # Calculate confusion matrix and metrics
58
  for i, class_ in enumerate(COLOR_PALETTE.keys()):
59
  class_pred = maps[i]
60
- # class_gt = (gt == i)
61
 
62
- # tp = np.sum(class_pred[class_gt==True])
63
- # fp = np.sum(class_pred[class_gt==False])
64
- # tn = np.sum(np.logical_not(class_pred)[class_gt==False])
65
- # fn = np.sum(np.logical_not(class_pred)[class_gt==True])
66
 
67
- # # Accumulate metrics for each class
68
- # metrics[class_]['tp'] += tp
69
- # metrics[class_]['fp'] += fp
70
- # metrics[class_]['tn'] += tn
71
- # metrics[class_]['fn'] += fn
72
 
73
  # Init mean recall, precision and F1 score
74
  mRecall = 0.0
@@ -80,7 +95,7 @@ def evaluate(paths):
80
  scores[class_]['recall'] = metrics[class_]['tp'] / (metrics[class_]['tp'] + metrics[class_]['fn']) if metrics[class_]['tp'] > 0 else 0.0
81
  scores[class_]['precision'] = metrics[class_]['tp'] / (metrics[class_]['tp'] + metrics[class_]['fp']) if metrics[class_]['tp'] > 0 else 0.0
82
  scores[class_]['f1'] = 2 * scores[class_]['precision'] * scores[class_]['recall'] / (scores[class_]['precision'] + scores[class_]['recall']) if (scores[class_]['precision'] != 0 and scores[class_]['recall'] != 0) else 0.0
83
-
84
  mRecall += scores[class_]['recall']
85
  mPrecision += scores[class_]['precision']
86
  mF1 += scores[class_]['f1']
@@ -91,12 +106,58 @@ def evaluate(paths):
91
  mPrecision /= class_count
92
  mF1 /= class_count
93
 
94
- with gr.Blocks() as demo:
95
- gr.Markdown("# HistoCan Evaluation Page")
96
- files = gr.File(label="Upload the segmentation masks for test set", file_count="multiple", file_types=["image"])
97
- run = gr.Button(value="Run evaluation")
98
-
99
- files.upload(files_uploaded, files, [])
100
- run.click(evaluate, files, [])
101
-
102
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import numpy as np
4
  from datasets import load_dataset
5
  import os
6
+ import tempfile
7
+
8
 
9
  dataset = load_dataset("erceguder/histocan-test", token=os.environ["HF_TOKEN"])
10
 
11
+
12
  COLOR_PALETTE = {
13
  'others': (0, 0, 0),
14
  't-g1': (0, 192, 0),
 
17
  'normal-mucosa': (0, 32, 255)
18
  }
19
 
20
+
21
  def files_uploaded(paths):
22
  if len(paths) != 16:
23
  raise gr.Error("16 segmentation masks are needed.")
24
 
25
+ uploaded_file_names = [paths[i].name.split('/')[-1] for i in range(16)]
26
+ for i in range(16):
27
+ if f"test{i:04d}.png" not in uploaded_file_names:
28
+ raise gr.Error(f"Uploaded file names are not recognized.")
29
+
30
+
31
  def evaluate(paths):
32
  if paths == None:
33
  raise gr.Error("Upload segmentation masks first!")
 
52
  }
53
  scores[class_] = idict
54
 
55
+ tmpdir = tempfile.TemporaryDirectory()
56
  for path in paths:
57
+ os.rename(path.name, os.path.join(tmpdir.name, path.name.split('/')[-1]))
 
58
 
59
+ for item in dataset["test"]:
60
+ pred_path = os.path.join(tmpdir.name, item["name"])
61
+ pred = np.array(Image.open(pred_path))
62
+ gt = np.array(item["annotation"])
63
+
64
+ assert gt.ndim == 2
65
  assert pred.ndim == 3 and pred.shape[-1] == 3
66
+ assert gt.shape == pred.shape[:-1]
67
 
68
  # Get predictions for all classes
69
  out = [(pred == color).all(axis=-1) for color in COLOR_PALETTE.values()]
 
72
  # Calculate confusion matrix and metrics
73
  for i, class_ in enumerate(COLOR_PALETTE.keys()):
74
  class_pred = maps[i]
75
+ class_gt = (gt == i)
76
 
77
+ tp = np.sum(class_pred[class_gt==True])
78
+ fp = np.sum(class_pred[class_gt==False])
79
+ tn = np.sum(np.logical_not(class_pred)[class_gt==False])
80
+ fn = np.sum(np.logical_not(class_pred)[class_gt==True])
81
 
82
+ # Accumulate metrics for each class
83
+ metrics[class_]['tp'] += tp
84
+ metrics[class_]['fp'] += fp
85
+ metrics[class_]['tn'] += tn
86
+ metrics[class_]['fn'] += fn
87
 
88
  # Init mean recall, precision and F1 score
89
  mRecall = 0.0
 
95
  scores[class_]['recall'] = metrics[class_]['tp'] / (metrics[class_]['tp'] + metrics[class_]['fn']) if metrics[class_]['tp'] > 0 else 0.0
96
  scores[class_]['precision'] = metrics[class_]['tp'] / (metrics[class_]['tp'] + metrics[class_]['fp']) if metrics[class_]['tp'] > 0 else 0.0
97
  scores[class_]['f1'] = 2 * scores[class_]['precision'] * scores[class_]['recall'] / (scores[class_]['precision'] + scores[class_]['recall']) if (scores[class_]['precision'] != 0 and scores[class_]['recall'] != 0) else 0.0
98
+
99
  mRecall += scores[class_]['recall']
100
  mPrecision += scores[class_]['precision']
101
  mF1 += scores[class_]['f1']
 
106
  mPrecision /= class_count
107
  mF1 /= class_count
108
 
109
+ tmpdir.cleanup()
110
+
111
+ result = """
112
+ <div align="center">
113
+
114
+ # Results
115
+
116
+ | | Others | T-G1 | T-G2 | T-G3 | Normal mucosa |
117
+ |-----------|--------|------|------|------|---------------|
118
+ | Precision | {:.2f} |{:.2f}|{:.2f}|{:.2f}| {:.2f} |
119
+ | Recall | {:.2f} |{:.2f}|{:.2f}|{:.2f}| {:.2f} |
120
+ | Dice | {:.2f} |{:.2f}|{:.2f}|{:.2f}| {:.2f} |
121
+
122
+ ### mPrecision: {:.4f}
123
+ ### mRecall: {:.4f}
124
+ ### mDice: {:.4f}
125
+
126
+ </div>
127
+ """
128
+
129
+ result = result.format(
130
+ scores["others"]["precision"],
131
+ scores["t-g1"]["precision"],
132
+ scores["t-g2"]["precision"],
133
+ scores["t-g3"]["precision"],
134
+ scores["normal-mucosa"]["precision"],
135
+ scores["others"]["recall"],
136
+ scores["t-g1"]["recall"],
137
+ scores["t-g2"]["recall"],
138
+ scores["t-g3"]["recall"],
139
+ scores["normal-mucosa"]["recall"],
140
+ scores["others"]["f1"],
141
+ scores["t-g1"]["f1"],
142
+ scores["t-g2"]["f1"],
143
+ scores["t-g3"]["f1"],
144
+ scores["normal-mucosa"]["f1"],
145
+ mPrecision,
146
+ mRecall,
147
+ mF1
148
+ )
149
+ return gr.Markdown(value=result)
150
+
151
+
152
+ if __name__ == "__main__":
153
+ with gr.Blocks() as demo:
154
+ gr.Markdown("# Histocan Test Set Evaluation Page")
155
+ files = gr.File(label="Upload your segmentation masks for the test set", file_count="multiple", file_types=["image"])
156
+ run = gr.Button(value="Evaluate!")
157
+ output = gr.Markdown(value="")
158
+
159
+ files.upload(files_uploaded, files, [])
160
+ run.click(evaluate, files, [output])
161
+
162
+ demo.queue(max_size=1)
163
+ demo.launch()