Alimustoofaa commited on
Commit
9bd5ac0
1 Parent(s): a2f6cfe

first commit

Browse files
Files changed (1) hide show
  1. app.py +62 -0
app.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ import numpy as np
4
+ import gradio as gr
5
+ from PIL import Image
6
+
7
+ # Define path the model
8
+ PATH_PROTOTXT = os.path.join('saved_model/MobileNetSSD_deploy.prototxt')
9
+ PATH_MODEL = os.path.join('saved_model/MobileNetSSD_deploy.caffemodel')
10
+ # Define clasess model
11
+ CLASSES = [
12
+ 'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
13
+ 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'hourse',
14
+ 'motorbike', 'person', 'porredplant', 'sheep', 'sofa', 'train', 'tvmonitor'
15
+ ]
16
+
17
+ # Load model
18
+ NET = cv2.dnn.readNetFromCaffe(PATH_PROTOTXT, PATH_MODEL)
19
+
20
+ def person_counting(image, threshold=0.7):
21
+ '''
22
+ Counting the number of people in the image
23
+ Args:
24
+ image: image to be processed
25
+ threshold: threshold to filter out the objects
26
+ Returns:
27
+ image: image with rectangles people detected
28
+ counting: count of people
29
+ '''
30
+
31
+ counting = 0
32
+ W, H = image.shape[1], image.shape[0]
33
+ blob = cv2.dnn.blobFromImage(image, 0.007843, (W, H), 127.5)
34
+ NET.setInput(blob); detections = NET.forward()
35
+
36
+ for i in np.arange(0, detections.shape[2]):
37
+ conf = detections[0, 0, i, 2]
38
+ idx = int(detections[0, 0, i, 1])
39
+ if CLASSES[idx] == 'person' and conf > threshold:
40
+ box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
41
+ x_min, y_min, x_max, y_max = box.astype('int')
42
+ counting += 1
43
+ cv2.rectangle(image, pt1=(x_min,y_min), pt2=(x_max,y_max), color=(255,0,0), thickness=1)
44
+ return image, counting
45
+
46
+ title = 'People counting'
47
+ css = ".image-preview {height: auto !important;}"
48
+
49
+ inputs = [gr.inputs.Image(source='upload'), gr.Slider(0, 1, value=0.5, label='threshold')]
50
+ outputs = [gr.outputs.Image(label='image output'), gr.Number(label='counting')]
51
+ examples = [[f'images/{i}', 0.5] for i in os.listdir('images')]
52
+
53
+ iface = gr.Interface(
54
+ title = title,
55
+ fn = person_counting,
56
+ inputs = inputs,
57
+ outputs = outputs,
58
+ examples= examples,
59
+ css=css
60
+ )
61
+
62
+ iface.launch()