Spaces:
Sleeping
Sleeping
liam-jemison
commited on
Commit
•
7a70a13
1
Parent(s):
230a0da
adding tiling + dataframe output
Browse files- app.py +59 -17
- models.py +20 -3
- sporo_40x.jpg +0 -0
app.py
CHANGED
@@ -1,7 +1,8 @@
|
|
1 |
import gradio as gr
|
2 |
from models import yolo
|
3 |
-
from cv2 import
|
4 |
import numpy as np
|
|
|
5 |
|
6 |
|
7 |
|
@@ -10,26 +11,67 @@ with open("eggs.names") as f:
|
|
10 |
classes = f.read().split("\n")
|
11 |
|
12 |
num_classes = len(classes)
|
13 |
-
|
14 |
-
|
15 |
-
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
classifier = yolo("yolov4-eggs.cfg", "yolov4-eggs_best.weights", classes)
|
18 |
|
19 |
-
def classify(
|
20 |
-
predictions = classifier.run_inference(img)
|
21 |
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
-
|
32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
iface.launch()
|
|
|
1 |
import gradio as gr
|
2 |
from models import yolo
|
3 |
+
from cv2 import COLOR_BGR2RGB, imread, cvtColor
|
4 |
import numpy as np
|
5 |
+
import pandas as pd
|
6 |
|
7 |
|
8 |
|
|
|
11 |
classes = f.read().split("\n")
|
12 |
|
13 |
num_classes = len(classes)
|
14 |
+
|
15 |
+
# stitched_h = 1
|
16 |
+
# stitched_v = 1
|
17 |
+
# magnification_reduction = 2.5
|
18 |
+
|
19 |
+
# h_mag_factor = int(stitched_h * magnification_reduction) #e.g. 6
|
20 |
+
# v_mag_factor = int(stitched_v * magnification_reduction) #e.g. 8
|
21 |
+
|
22 |
|
23 |
classifier = yolo("yolov4-eggs.cfg", "yolov4-eggs_best.weights", classes)
|
24 |
|
25 |
+
def classify(img_path, h_tiles, v_tiles):
|
|
|
26 |
|
27 |
+
img = imread(img_path)
|
28 |
+
#so we need to break the full image up into h_mag_factor by v_mag_factor subimages
|
29 |
+
|
30 |
+
h_splits = np.array_split(img, h_tiles)
|
31 |
+
|
32 |
+
|
33 |
+
h_offset = 0
|
34 |
+
output = img.copy()
|
35 |
+
detections = []
|
36 |
+
for h_split in h_splits:
|
37 |
+
v_offset = 0
|
38 |
+
#split this horizontal strip vertically
|
39 |
+
v_splits = np.array_split(h_split, v_tiles, axis =1)
|
40 |
+
for split in v_splits:
|
41 |
+
predictions = classifier.run_inference(split, h_offset, v_offset)
|
42 |
+
#print(len(prediction))
|
43 |
+
|
44 |
+
output = classifier.show_predictions(predictions, output)
|
45 |
+
#add to the vertical offset
|
46 |
+
v_offset += split.shape[1]
|
47 |
+
|
48 |
+
detections.extend(predictions)
|
49 |
+
|
50 |
+
|
51 |
+
#add the width of this split to the horizontal offset
|
52 |
+
h_offset += h_split.shape[0]
|
53 |
+
detections = pd.DataFrame(detections, columns = ["x", "y", "h", "w", "confidence", "class", "class id"])
|
54 |
+
return cvtColor(output, COLOR_BGR2RGB), detections
|
55 |
|
56 |
+
description = """
|
57 |
+
Demonstration of kelp sporophyte object detection network.
|
58 |
+
|
59 |
+
The tiling sliders control the amount of horizontal and vertical tiling
|
60 |
+
applied to the image. Training images were taken at 100x magnification
|
61 |
+
(10x microscope lens x 10x lense), so the tiling can be used to match input
|
62 |
+
image scale to the training data scale, which generally improves performance.
|
63 |
+
For example, to detect objects in an image taken at 40x, try using 2-3
|
64 |
+
horizontal and vertical tiles.
|
65 |
|
66 |
+
"""
|
67 |
+
iface = gr.Interface(fn = classify,
|
68 |
+
inputs = [gr.Image(type="filepath"), gr.Slider(minimum=1, maximum=10, value=1, step=1),
|
69 |
+
gr.Slider(minimum=1, maximum=10, value=1, step=1)],
|
70 |
+
outputs = [gr.Image(invert_colors=False), gr.DataFrame()],
|
71 |
+
examples = [["ex1.jpg", 1, 1],
|
72 |
+
["ex2.jpg", 1, 1],
|
73 |
+
["ex3.jpg", 1, 1],
|
74 |
+
["ex4.jpg", 1, 1],
|
75 |
+
["sporo_40x.jpg", 2, 2]],
|
76 |
+
description=description)
|
77 |
iface.launch()
|
models.py
CHANGED
@@ -16,11 +16,28 @@ class yolo:
|
|
16 |
self.classes = classes
|
17 |
self.confidence_threshold = confidence_threshold
|
18 |
|
19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
"""forward pass on a list of images
|
21 |
|
22 |
returns: list of predictions in the format
|
23 |
-
[(
|
24 |
"""
|
25 |
|
26 |
|
@@ -69,7 +86,7 @@ class yolo:
|
|
69 |
(w, h) = (boxes[i][2], boxes[i][3])
|
70 |
label = self.classes[classIDs[i]]
|
71 |
confidence = confidences[i]
|
72 |
-
outputs.append((x,y,w,h,confidence,label, classIDs[i]))
|
73 |
return outputs
|
74 |
"""
|
75 |
colors = np.random.randint(0, 255, size=(len(classes), 3), dtype='uint8')
|
|
|
16 |
self.classes = classes
|
17 |
self.confidence_threshold = confidence_threshold
|
18 |
|
19 |
+
#palette for drawing on images
|
20 |
+
palette = np.arange(0, 255, dtype=np.uint8).reshape(1, 255, 1)
|
21 |
+
palette = cv.applyColorMap(palette, cv.COLORMAP_HSV).squeeze(0)
|
22 |
+
np.random.shuffle(palette)
|
23 |
+
self.palette = palette
|
24 |
+
def show_predictions(self, predictions, input):
|
25 |
+
img = input.copy()
|
26 |
+
for (x,y,w,h, confidence, label, classID) in predictions:
|
27 |
+
#print(label)
|
28 |
+
|
29 |
+
cv.rectangle(img, (x, y), (x + w, y + h), tuple(self.palette[classID].tolist()), 2)
|
30 |
+
text = "{}: {:.2f}".format(label, confidence)
|
31 |
+
cv.putText(img, text, (x, y - 5), cv.FONT_HERSHEY_SIMPLEX, 0.5, tuple(self.palette[classID].tolist()), 1)
|
32 |
+
# for (value,key) in zip([name, x,y,w,h, confidence, label, classID], detections.keys()):
|
33 |
+
# detections[key].append(value)
|
34 |
+
return img
|
35 |
+
|
36 |
+
def run_inference(self, img, h_offset, v_offset):
|
37 |
"""forward pass on a list of images
|
38 |
|
39 |
returns: list of predictions in the format
|
40 |
+
[(x, y, w, h, conf, class),..] e.g. [ (215, 47, 199, 258, 0.8504674434661865, 'Sporophyte')]
|
41 |
"""
|
42 |
|
43 |
|
|
|
86 |
(w, h) = (boxes[i][2], boxes[i][3])
|
87 |
label = self.classes[classIDs[i]]
|
88 |
confidence = confidences[i]
|
89 |
+
outputs.append((x + v_offset,y + h_offset,w,h,confidence,label, classIDs[i]))
|
90 |
return outputs
|
91 |
"""
|
92 |
colors = np.random.randint(0, 255, size=(len(classes), 3), dtype='uint8')
|
sporo_40x.jpg
ADDED