andrewgleave commited on
Commit
6cb8901
1 Parent(s): e45fc58
Files changed (1) hide show
  1. app.py +60 -11
app.py CHANGED
@@ -1,4 +1,7 @@
 
 
1
  import io
 
2
 
3
  import torch
4
  import gradio as gr
@@ -15,11 +18,11 @@ matplotlib.pyplot.switch_backend('Agg')
15
 
16
  COLORS = [
17
  [0.000, 0.447, 0.741],
18
- [0.850, 0.325, 0.098],
19
- [0.929, 0.694, 0.125],
20
- [0.494, 0.184, 0.556],
21
- [0.466, 0.674, 0.188],
22
- [0.301, 0.745, 0.933]
23
  ]
24
 
25
  PRED_THRESHOLD = 0.90
@@ -32,7 +35,7 @@ def fig2img(fig):
32
  return img
33
 
34
  def composite_predictions(img, processed_predictions):
35
- keep = processed_predictions["labels"] >= 1 # only interested in people
36
  boxes = processed_predictions["boxes"][keep].tolist()
37
  scores = processed_predictions["scores"][keep].tolist()
38
  labels = processed_predictions["labels"][keep].tolist()
@@ -42,16 +45,18 @@ def composite_predictions(img, processed_predictions):
42
  plt.figure(figsize=(16, 10))
43
  plt.imshow(img)
44
  axis = plt.gca()
45
- colors = COLORS * 100
46
- for score, (xmin, ymin, xmax, ymax), label, color in zip(scores, boxes, labels, colors):
47
  axis.add_patch(plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, fill=False, color=color, linewidth=3))
48
  axis.text(xmin, ymin, f"{label}: {score:0.2f}", fontsize=15, bbox=dict(facecolor="yellow", alpha=0.5))
 
49
  plt.axis("off")
50
  img = fig2img(plt.gcf())
51
  matplotlib.pyplot.close()
52
- return img, len(boxes)
53
 
54
  def process(img):
 
55
  inputs = extractor(images=img, return_tensors="pt")
56
  outputs = model(**inputs)
57
  img_size = torch.tensor([tuple(reversed(img.size))])
@@ -60,7 +65,51 @@ def process(img):
60
  # Composite image and prediction bounding boxes + labels prediction
61
  return composite_predictions(img, processed[0])
62
 
63
- demo = gr.Interface(fn=process, inputs=[gr.Image(source="webcam", streaming=True, tool=None, type='pil')], outputs=["image", gr.Label(label="Current Attendance")], live=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
  if __name__ == "__main__":
66
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ import datetime
3
  import io
4
+ import time
5
 
6
  import torch
7
  import gradio as gr
 
18
 
19
  COLORS = [
20
  [0.000, 0.447, 0.741],
21
+ # [0.850, 0.325, 0.098],
22
+ # [0.929, 0.694, 0.125],
23
+ # [0.494, 0.184, 0.556],
24
+ # [0.466, 0.674, 0.188],
25
+ # [0.301, 0.745, 0.933]
26
  ]
27
 
28
  PRED_THRESHOLD = 0.90
 
35
  return img
36
 
37
  def composite_predictions(img, processed_predictions):
38
+ keep = processed_predictions["labels"] == 1 # only interested in people
39
  boxes = processed_predictions["boxes"][keep].tolist()
40
  scores = processed_predictions["scores"][keep].tolist()
41
  labels = processed_predictions["labels"][keep].tolist()
 
45
  plt.figure(figsize=(16, 10))
46
  plt.imshow(img)
47
  axis = plt.gca()
48
+ label_counts = defaultdict(int)
49
+ for score, (xmin, ymin, xmax, ymax), label, color in zip(scores, boxes, labels, COLORS * len(boxes)):
50
  axis.add_patch(plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, fill=False, color=color, linewidth=3))
51
  axis.text(xmin, ymin, f"{label}: {score:0.2f}", fontsize=15, bbox=dict(facecolor="yellow", alpha=0.5))
52
+ label_counts[label] += 1
53
  plt.axis("off")
54
  img = fig2img(plt.gcf())
55
  matplotlib.pyplot.close()
56
+ return img, label_counts, datetime.datetime.now()
57
 
58
  def process(img):
59
+ #time.sleep(5)
60
  inputs = extractor(images=img, return_tensors="pt")
61
  outputs = model(**inputs)
62
  img_size = torch.tensor([tuple(reversed(img.size))])
 
65
  # Composite image and prediction bounding boxes + labels prediction
66
  return composite_predictions(img, processed[0])
67
 
68
+
69
+
70
+ with gr.Blocks() as demo:
71
+ stream = gr.State()
72
+ with gr.Row():
73
+ with gr.Column(scale=1, min_width=600):
74
+ last_refresh_box = gr.Textbox(label="Last updated")
75
+ attendance_label = gr.Label(label="Current Attendance")
76
+ with gr.Row():
77
+ with gr.Column(scale=1, min_width=600):
78
+ webcam = gr.Webcam(streaming=True, type='pil')
79
+ output = gr.Image(label="Composite")
80
+ webcam.stream(process, [webcam], [output, attendance_label, last_refresh_box])
81
+ #webcam.change(process, inputs=[], outputs=[output, gr.Label(label="Current Attendance"), last_refresh_box], max_batch_size=10, batch=True)
82
+ #demo.load(lambda: datetime.datetime.now(), None, last_refresh_box, every=10)
83
 
84
  if __name__ == "__main__":
85
+ demo.queue().launch()
86
+
87
+
88
+
89
+ # import gradio as gr
90
+ # import numpy as np
91
+ # import time
92
+
93
+ # def add_to_stream(audio, instream):
94
+ # time.sleep(1)
95
+ # if audio is None:
96
+ # return gr.update(), instream
97
+ # if instream is None:
98
+ # ret = audio
99
+ # else:
100
+ # ret = (audio[0], np.concatenate((instream[1], audio[1])))
101
+ # return ret, ret
102
+
103
+
104
+ # with gr.Blocks() as demo:
105
+ # inp = gr.Audio(source="microphone")
106
+ # out = gr.Audio()
107
+ # stream = gr.State()
108
+ # clear = gr.Button("Clear")
109
+
110
+ # inp.stream(add_to_stream, [inp, stream], [out, stream])
111
+ # clear.click(lambda: [None, None, None], None, [inp, out, stream])
112
+
113
+
114
+ # if __name__ == "__main__":
115
+ # demo.launch()