File size: 2,880 Bytes
b39fc19
 
c3634f9
b39fc19
7a70a13
b39fc19
 
 
c3634f9
 
b39fc19
 
 
 
7a70a13
 
 
 
 
 
 
 
b39fc19
 
 
7a70a13
b39fc19
7a70a13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c3634f9
 
 
 
 
 
 
 
 
 
 
 
 
 
b39fc19
7a70a13
 
 
 
 
 
 
 
 
b39fc19
7a70a13
 
 
 
c3634f9
7a70a13
 
 
 
 
 
b39fc19
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import gradio as gr
from models import yolo
from cv2 import  COLOR_BGR2RGB, imread, cvtColor, resize
import numpy as np
import pandas as pd



MAX_H = 1000
MAX_W = 1000
with open("eggs.names") as f:
    classes = f.read().split("\n")

num_classes = len(classes)

# stitched_h = 1
# stitched_v = 1
# magnification_reduction = 2.5

# h_mag_factor = int(stitched_h * magnification_reduction) #e.g. 6
# v_mag_factor = int(stitched_v * magnification_reduction) #e.g. 8


classifier = yolo("yolov4-eggs.cfg", "yolov4-eggs_best.weights", classes)

def classify(img_path, h_tiles, v_tiles):
    
    img = imread(img_path)
    #so we need to break the full image up into h_mag_factor by v_mag_factor subimages
    
    h_splits = np.array_split(img, h_tiles)

    
    h_offset = 0
    output = img.copy()
    detections = []
    for h_split in h_splits:
        v_offset = 0
        #split this horizontal strip vertically
        v_splits = np.array_split(h_split, v_tiles, axis =1)
        for split in v_splits:
            predictions = classifier.run_inference(split, h_offset, v_offset)
            #print(len(prediction))
            
            output = classifier.show_predictions(predictions, output)
            #add to the vertical offset 
            v_offset += split.shape[1]
            
            detections.extend(predictions)


        #add the width of this split to the horizontal offset
        h_offset += h_split.shape[0]
    detections = pd.DataFrame(detections, columns = ["x", "y", "h", "w", "confidence", "class", "class id"])
    
    output = cvtColor(output, COLOR_BGR2RGB)
    x,y = output.shape[0], output.shape[1]
    
    if x > y and x > MAX_W:
        new_x = MAX_W/x
        new_y = (y/x)*new_x
    elif y > MAX_H:
        new_y = MAX_H/y
        new_x = (x/y)*new_y
    else:
        new_x = 1
        new_y = 1
    return  resize(output, fx = new_x, fy = new_y, dsize = None), detections
        
description = """
    Demonstration of kelp sporophyte object detection network. 
    
    The tiling sliders control the amount of horizontal and vertical tiling
    applied to the image. Training images were taken at 100x magnification 
    (10x microscope lens x 10x lense), so the tiling can be used to match input
    image scale to the training data scale, which generally improves performance.
    For example, to detect objects in an image taken at 40x, try using 2-3
    horizontal and vertical tiles. 

"""
iface = gr.Interface(fn = classify,
    inputs = [gr.Image(type="filepath"), gr.Slider(minimum=1, maximum=10, value=1, step=1), 
        gr.Slider(minimum=1, maximum=10, value=1, step=1)],
    outputs = [gr.Image(), gr.DataFrame()],
    examples = [["ex1.jpg", 1, 1],
        ["ex2.jpg", 1, 1],
        ["ex3.jpg", 1, 1],
        ["ex4.jpg", 1, 1],
        ["sporo_40x.jpg", 2, 2]],
        description=description)
iface.launch()