Spaces:
Sleeping
Sleeping
import gradio as gr | |
from models import yolo | |
from cv2 import COLOR_BGR2RGB, imread, cvtColor, resize | |
import numpy as np | |
import pandas as pd | |
MAX_H = 1000 | |
MAX_W = 1000 | |
with open("eggs.names") as f: | |
classes = f.read().split("\n") | |
num_classes = len(classes) | |
# stitched_h = 1 | |
# stitched_v = 1 | |
# magnification_reduction = 2.5 | |
# h_mag_factor = int(stitched_h * magnification_reduction) #e.g. 6 | |
# v_mag_factor = int(stitched_v * magnification_reduction) #e.g. 8 | |
classifier = yolo("yolov4-eggs.cfg", "yolov4-eggs_best.weights", classes) | |
def classify(img_path, h_tiles, v_tiles): | |
img = imread(img_path) | |
#so we need to break the full image up into h_mag_factor by v_mag_factor subimages | |
h_splits = np.array_split(img, h_tiles) | |
h_offset = 0 | |
output = img.copy() | |
detections = [] | |
for h_split in h_splits: | |
v_offset = 0 | |
#split this horizontal strip vertically | |
v_splits = np.array_split(h_split, v_tiles, axis =1) | |
for split in v_splits: | |
predictions = classifier.run_inference(split, h_offset, v_offset) | |
#print(len(prediction)) | |
output = classifier.show_predictions(predictions, output) | |
#add to the vertical offset | |
v_offset += split.shape[1] | |
detections.extend(predictions) | |
#add the width of this split to the horizontal offset | |
h_offset += h_split.shape[0] | |
detections = pd.DataFrame(detections, columns = ["x", "y", "h", "w", "confidence", "class", "class id"]) | |
output = cvtColor(output, COLOR_BGR2RGB) | |
x,y = output.shape[0], output.shape[1] | |
if x > y and x > MAX_W: | |
new_x = MAX_W/x | |
new_y = (y/x)*new_x | |
elif y > MAX_H: | |
new_y = MAX_H/y | |
new_x = (x/y)*new_y | |
else: | |
new_x = 1 | |
new_y = 1 | |
return resize(output, fx = new_x, fy = new_y, dsize = None), detections | |
description = """ | |
Demonstration of kelp sporophyte object detection network. | |
The tiling sliders control the amount of horizontal and vertical tiling | |
applied to the image. Training images were taken at 100x magnification | |
(10x microscope lens x 10x lense), so the tiling can be used to match input | |
image scale to the training data scale, which generally improves performance. | |
For example, to detect objects in an image taken at 40x, try using 2-3 | |
horizontal and vertical tiles. | |
""" | |
iface = gr.Interface(fn = classify, | |
inputs = [gr.Image(type="filepath"), gr.Slider(minimum=1, maximum=10, value=1, step=1), | |
gr.Slider(minimum=1, maximum=10, value=1, step=1)], | |
outputs = [gr.Image(), gr.DataFrame()], | |
examples = [["ex1.jpg", 1, 1], | |
["ex2.jpg", 1, 1], | |
["ex3.jpg", 1, 1], | |
["ex4.jpg", 1, 1], | |
["sporo_40x.jpg", 2, 2]], | |
description=description) | |
iface.launch() |