erceguder commited on
Commit
032a6d3
1 Parent(s): 90f636a

add dataset

Browse files
Files changed (3) hide show
  1. .gitignore +1 -0
  2. app.py +98 -4
  3. requirements.txt +2 -1
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ __pycache__
app.py CHANGED
@@ -1,7 +1,101 @@
 
1
  import gradio as gr
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
 
6
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
  import gradio as gr
3
+ import numpy as np
4
+ from datasets import load_dataset
5
 
6
+ dataset = load_dataset("erceguder/histocan-test", token=True)
 
7
 
8
+ COLOR_PALETTE = {
9
+ 'others': (0, 0, 0),
10
+ 't-g1': (0, 192, 0),
11
+ 't-g2': (255, 224, 32),
12
+ 't-g3': (255, 0, 0),
13
+ 'normal-mucosa': (0, 32, 255)
14
+ }
15
+
16
+ def files_uploaded(paths):
17
+ if len(paths) != 16:
18
+ raise gr.Error("16 segmentation masks are needed.")
19
+
20
+ def evaluate(paths):
21
+ if paths == None:
22
+ raise gr.Error("Upload segmentation masks first!")
23
+
24
+ # Init dicts for accumulating image metrics and calculating per-class scores
25
+ metrics = {}
26
+ for class_ in COLOR_PALETTE.keys():
27
+ idict = {
28
+ "tp": 0.0,
29
+ "fp": 0.0,
30
+ "tn": 0.0,
31
+ "fn": 0.0,
32
+ }
33
+ metrics[class_] = idict
34
+
35
+ scores = {}
36
+ for class_ in COLOR_PALETTE.keys():
37
+ idict = {
38
+ "recall": 0.0,
39
+ "precision": 0.0,
40
+ "f1": 0.0
41
+ }
42
+ scores[class_] = idict
43
+
44
+ for path in paths:
45
+ pred = np.array(Image.open(path.name)) # shape (H, W, 3)
46
+ # gt = np.array(Image.open(os.path.basename(file.name)))
47
+
48
+ # assert gt.ndim == 2
49
+ assert pred.ndim == 3 and pred.shape[-1] == 3
50
+ # assert gt.shape == pred.shape[:-1]
51
+
52
+ # Get predictions for all classes
53
+ out = [(pred == color).all(axis=-1) for color in COLOR_PALETTE.values()]
54
+ maps = np.stack(out)
55
+
56
+ # Calculate confusion matrix and metrics
57
+ for i, class_ in enumerate(COLOR_PALETTE.keys()):
58
+ class_pred = maps[i]
59
+ # class_gt = (gt == i)
60
+
61
+ # tp = np.sum(class_pred[class_gt==True])
62
+ # fp = np.sum(class_pred[class_gt==False])
63
+ # tn = np.sum(np.logical_not(class_pred)[class_gt==False])
64
+ # fn = np.sum(np.logical_not(class_pred)[class_gt==True])
65
+
66
+ # # Accumulate metrics for each class
67
+ # metrics[class_]['tp'] += tp
68
+ # metrics[class_]['fp'] += fp
69
+ # metrics[class_]['tn'] += tn
70
+ # metrics[class_]['fn'] += fn
71
+
72
+ # Init mean recall, precision and F1 score
73
+ mRecall = 0.0
74
+ mPrecision = 0.0
75
+ mF1 = 0.0
76
+
77
+ # Calculate recall, precision and f1 scores for each class
78
+ for i, class_ in enumerate(COLOR_PALETTE.keys()):
79
+ scores[class_]['recall'] = metrics[class_]['tp'] / (metrics[class_]['tp'] + metrics[class_]['fn']) if metrics[class_]['tp'] > 0 else 0.0
80
+ scores[class_]['precision'] = metrics[class_]['tp'] / (metrics[class_]['tp'] + metrics[class_]['fp']) if metrics[class_]['tp'] > 0 else 0.0
81
+ scores[class_]['f1'] = 2 * scores[class_]['precision'] * scores[class_]['recall'] / (scores[class_]['precision'] + scores[class_]['recall']) if (scores[class_]['precision'] != 0 and scores[class_]['recall'] != 0) else 0.0
82
+
83
+ mRecall += scores[class_]['recall']
84
+ mPrecision += scores[class_]['precision']
85
+ mF1 += scores[class_]['f1']
86
+
87
+ # Calculate mean recall, precision and F1 score over all classes
88
+ class_count = len(COLOR_PALETTE)
89
+ mRecall /= class_count
90
+ mPrecision /= class_count
91
+ mF1 /= class_count
92
+
93
+ with gr.Blocks() as demo:
94
+ gr.Markdown("# HistoCan Evaluation Page")
95
+ files = gr.File(label="Upload the segmentation masks for test set", file_count="multiple", file_types=["image"])
96
+ run = gr.Button(value="Run evaluation")
97
+
98
+ files.upload(files_uploaded, files, [])
99
+ run.click(evaluate, files, [])
100
+
101
+ demo.launch()
requirements.txt CHANGED
@@ -1 +1,2 @@
1
- gradio==3.45.2
 
 
1
+ gradio==3.45.2
2
+ datasets==2.14.6