KahChoo commited on
Commit
613f91b
1 Parent(s): 311a9b6

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +135 -0
app.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import gradio as gr
3
+ import math
4
+
5
+ demo = gr.Blocks()
6
+
7
+ def estimate(input_img):
8
+ threshold = 0.9
9
+ img = Image.fromarray(input_img.astype('uint8'),'RGB')
10
+
11
+ (w, h) = img.size
12
+ image_intrinsics = np.array([[w + h, 0, w // 2], [0, w + h, h // 2], [0, 0, 1]])
13
+
14
+ res = img2pose_model.predict([transform(img)])[0]
15
+
16
+ all_bboxes = res["boxes"].cpu().numpy().astype('float')
17
+
18
+ poses = []
19
+ bboxes = []
20
+ for i in range(len(all_bboxes)):
21
+ if res["scores"][i] > threshold:
22
+ bbox = all_bboxes[i]
23
+ pose_pred = res["dofs"].cpu().numpy()[i].astype('float')
24
+ pose_pred = pose_pred.squeeze()
25
+
26
+ poses.append(pose_pred)
27
+ bboxes.append(bbox)
28
+
29
+ trans_vertices = renderer.transform_vertices(img, poses)
30
+ img = renderer.render(img, trans_vertices, alpha=1)
31
+
32
+ plt.figure(figsize=(8, 8))
33
+
34
+ for bbox in bboxes:
35
+ plt.gca().add_patch(patches.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1],linewidth=3,edgecolor='b',facecolor='none'))
36
+
37
+ plt.imshow(img)
38
+ plt.show()
39
+ pose_pred[0] = math.degrees(pose_pred[0])
40
+ pose_pred[1] = math.degrees(pose_pred[1])
41
+ pose_pred[2] = math.degrees(pose_pred[2])
42
+ listToStr = ' '.join([str(elem) for elem in pose_pred])
43
+ return img,pose_pred[0],pose_pred[1],pose_pred[2],pose_pred[3],pose_pred[4],pose_pred[5]
44
+
45
+
46
+ def estimate_crowd(input_img):
47
+ threshold = 0.5
48
+ img = Image.fromarray(input_img.astype('uint8'),'RGB')
49
+
50
+ (w, h) = img.size
51
+ image_intrinsics = np.array([[w + h, 0, w // 2], [0, w + h, h // 2], [0, 0, 1]])
52
+
53
+ res = img2pose_model.predict([transform(img)])[0]
54
+
55
+ all_bboxes = res["boxes"].cpu().numpy().astype('float')
56
+
57
+ poses = []
58
+ bboxes = []
59
+ for i in range(len(all_bboxes)):
60
+ if res["scores"][i] > threshold:
61
+ bbox = all_bboxes[i]
62
+ pose_pred = res["dofs"].cpu().numpy()[i].astype('float')
63
+ pose_pred = pose_pred.squeeze()
64
+
65
+ poses.append(pose_pred)
66
+ bboxes.append(bbox)
67
+
68
+
69
+ #render_plot(img.copy(), poses, bboxes)
70
+
71
+ n = str((len(bboxes)))
72
+ (w, h) = img.size
73
+ image_intrinsics = np.array([[w + h, 0, w // 2], [0, w + h, h // 2], [0, 0, 1]])
74
+
75
+ trans_vertices = renderer.transform_vertices(img, poses)
76
+ img = renderer.render(img, trans_vertices, alpha=1)
77
+ return img, n
78
+
79
+
80
+ with demo:
81
+
82
+ gr.Markdown("Head Pose Estimation in Crowd")
83
+
84
+ with gr.Tabs():
85
+ with gr.TabItem("What is Head Pose Estimation?"):
86
+ gr.Markdown(
87
+ """
88
+ ### Head pose estimation in crowd (HPECS) is a task of estimating the head pose of people in the crowd scenes.
89
+
90
+ Six degrees of freedom of head pose is the freedom of movement of the head pose in three-dimensional space. 6DoF is the
91
+ abbreviation of six degrees of freedoms. 6DoF of head pose records the rotational motion such as
92
+ pitch, yaw and roll , and translational motions of the head pose including up/down, left/right and front/back.
93
+ 6DoF of 3D head pose is being regressed as it can harvest more versatile
94
+ spatial information which able to track the movement of object even in a still image.
95
+
96
+ """
97
+ )
98
+ gr.Image("/content/gdrive/MyDrive/project_folder/img2pose/6DoF.gif",size=20)
99
+
100
+
101
+ with gr.TabItem("Head Pose Estimation"):
102
+ gr.Markdown(
103
+ """
104
+ ### Please insert image with single head
105
+
106
+ """
107
+ )
108
+ image = gr.inputs.Image()
109
+ text_button = gr.Button("Estimate")
110
+ with gr.Row():
111
+ outputs=[gr.outputs.Image(),gr.outputs.Textbox(type="auto", label="Yaw"),
112
+ gr.outputs.Textbox(type="auto", label="Pitch"),
113
+ gr.outputs.Textbox(type="auto", label="Roll"),
114
+ gr.outputs.Textbox(type="auto", label="tx"),
115
+ gr.outputs.Textbox(type="auto", label="ty"),
116
+ gr.outputs.Textbox(type="auto", label="tz")]
117
+
118
+ with gr.TabItem("Estimate Head Pose in Crowd"):
119
+ gr.Markdown(
120
+ """
121
+ ### Please insert crowd scene image
122
+
123
+ """
124
+ )
125
+ image_input = gr.Image()
126
+ with gr.Row():
127
+
128
+ image_output = [gr.outputs.Image(),gr.outputs.Textbox(type="auto", label="Number of head pose estimated"),
129
+ ]
130
+ image_button = gr.Button("Estimate")
131
+
132
+ text_button.click(estimate, inputs=image, outputs=outputs)
133
+ image_button.click(estimate_crowd, inputs=image_input, outputs=image_output)
134
+
135
+ demo.launch()