Anm5 commited on
Commit
f46aee3
1 Parent(s): 1aa0982

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -230
app.py CHANGED
@@ -1,233 +1,43 @@
1
- #!/usr/bin/env python
2
-
3
- from __future__ import annotations
4
-
5
- import pathlib
6
- import math
7
-
8
- import gradio as gr
9
- import cv2
10
- import mediapipe as mp
11
  import numpy as np
12
 
13
- mp_drawing = mp.solutions.drawing_utils
14
- mp_drawing_styles = mp.solutions.drawing_styles
15
- mp_pose = mp.solutions.pose
16
-
17
- TITLE = "MediaPipe Human Pose Estimation"
18
- DESCRIPTION = "https://google.github.io/mediapipe/"
19
-
20
-
21
- def calculateAngle(landmark1, landmark2, landmark3):
22
- '''
23
- This function calculates angle between three different landmarks.
24
- Args:
25
- landmark1: The first landmark containing the x,y and z coordinates.
26
- landmark2: The second landmark containing the x,y and z coordinates.
27
- landmark3: The third landmark containing the x,y and z coordinates.
28
- Returns:
29
- angle: The calculated angle between the three landmarks.
30
-
31
- '''
32
-
33
- # Get the required landmarks coordinates.
34
- x1, y1 = landmark1.x, landmark1.y
35
- x2, y2 = landmark2.x, landmark2.y
36
- x3, y3 = landmark3.x, landmark3.y
37
-
38
- # Calculate the angle between the three points
39
- angle = math.degrees(math.atan2(y3 - y2, x3 - x2) - math.atan2(y1 - y2, x1 - x2))
40
- # angle = abs(angle) # Convert the angle to an absolute value.
41
-
42
-
43
- # Check if the angle is less than zero.
44
- if angle < 0:
45
-
46
- # Add 360 to the found angle.
47
- angle += 360
48
-
49
- # Return the calculated angle.
50
- return angle
51
-
52
- def classifyPose(landmarks, output_image, display=False):
53
- '''
54
- This function classifies yoga poses depending upon the angles of various body joints.
55
- Args:
56
- landmarks: A list of detected landmarks of the person whose pose needs to be classified.
57
- output_image: A image of the person with the detected pose landmarks drawn.
58
- display: A boolean value that is if set to true the function displays the resultant image with the pose label
59
- written on it and returns nothing.
60
- Returns:
61
- output_image: The image with the detected pose landmarks drawn and pose label written.
62
- label: The classified pose label of the person in the output_image.
63
-
64
- '''
65
-
66
- # Initialize the label of the pose. It is not known at this stage.
67
- label = 'Unknown Pose'
68
-
69
- # Specify the color (Red) with which the label will be written on the image.
70
- color = (0, 0, 255)
71
-
72
- # Calculate the required angles.
73
- #----------------------------------------------------------------------------------------------------------------
74
-
75
- # Get the angle between the left shoulder, elbow and wrist points.
76
- left_elbow_angle = calculateAngle(landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value],
77
- landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value],
78
- landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value])
79
-
80
- # Get the angle between the right shoulder, elbow and wrist points.
81
- right_elbow_angle = calculateAngle(landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value],
82
- landmarks[mp_pose.PoseLandmark.RIGHT_ELBOW.value],
83
- landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value])
84
-
85
- # Get the angle between the left elbow, shoulder and hip points.
86
- left_shoulder_angle = calculateAngle(landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value],
87
- landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value],
88
- landmarks[mp_pose.PoseLandmark.LEFT_HIP.value])
89
-
90
- # Get the angle between the right hip, shoulder and elbow points.
91
- right_shoulder_angle = calculateAngle(landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value],
92
- landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value],
93
- landmarks[mp_pose.PoseLandmark.RIGHT_ELBOW.value])
94
-
95
- # Get the angle between the left hip, knee and ankle points.
96
- left_knee_angle = calculateAngle(landmarks[mp_pose.PoseLandmark.LEFT_HIP.value],
97
- landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value],
98
- landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value])
99
-
100
- # Get the angle between the right hip, knee and ankle points
101
- right_knee_angle = calculateAngle(landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value],
102
- landmarks[mp_pose.PoseLandmark.RIGHT_KNEE.value],
103
- landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE.value])
104
-
105
- #----------------------------------------------------------------------------------------------------------------
106
- # Check for Five-Pointed Star Pose
107
- if abs(landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].y - landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].y) < 100 and \
108
- abs(landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].y - landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value].y) < 100 and \
109
- abs(landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].x - landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE.value].x) > 200 and \
110
- abs(landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].x - landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].x) > 200:
111
- label = "Five-Pointed Star Pose"
112
-
113
- # Check if it is the warrior II pose or the T pose.
114
- if left_elbow_angle > 165 and left_elbow_angle < 195 and right_elbow_angle > 165 and right_elbow_angle < 195:
115
- if left_shoulder_angle > 80 and left_shoulder_angle < 110 and right_shoulder_angle > 80 and right_shoulder_angle < 110:
116
- if left_knee_angle > 165 and left_knee_angle < 195 or right_knee_angle > 165 and right_knee_angle < 195:
117
- if left_knee_angle > 90 and left_knee_angle < 120 or right_knee_angle > 90 and right_knee_angle < 120:
118
- label = 'Warrior II Pose'
119
- if left_knee_angle > 160 and left_knee_angle < 195 and right_knee_angle > 160 and right_knee_angle < 195:
120
- label = 'T Pose'
121
-
122
- # Check if it is the tree pose.
123
- if left_knee_angle > 165 and left_knee_angle < 195 or right_knee_angle > 165 and right_knee_angle < 195:
124
- if left_knee_angle > 315 and left_knee_angle < 335 or right_knee_angle > 25 and right_knee_angle < 45:
125
- label = 'Tree Pose'
126
-
127
- # Check for Upward Salute Pose
128
- if abs(landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].x - landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].x) < 100 and \
129
- abs(landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].x - landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value].x) < 100 and \
130
- landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].y < landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].y and \
131
- landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].y < landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value].y and \
132
- abs(landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].y - landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value].y) < 50:
133
- label = "Upward Salute Pose"
134
-
135
- # Check for Hands Under Feet Pose
136
- if landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].y > landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value].y and \
137
- landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].y > landmarks[mp_pose.PoseLandmark.RIGHT_KNEE.value].y and \
138
- abs(landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].x - landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].x) < 50 and \
139
- abs(landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].x - landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE.value].x) < 50:
140
- label = "Hands Under Feet Pose"
141
 
142
-
143
- #----------------------------------------------------------------------------------------------------------------
144
-
145
- # Check if the pose is classified successfully
146
- if label != 'Unknown Pose':
147
-
148
- # Update the color (to green) with which the label will be written on the image.
149
- color = (0, 255, 0)
150
-
151
- # Write the label on the output image.
152
- cv2.putText(output_image, label, (220, 30),cv2.FONT_HERSHEY_PLAIN, 2, color, 2)
153
-
154
- # Check if the resultant image is specified to be displayed.
155
- if display:
156
-
157
- # Display the resultant image.
158
- plt.figure(figsize=[10,10])
159
- plt.imshow(output_image[:,:,::-1]);plt.title("Output Image");plt.axis('off');
160
-
161
- else:
162
-
163
- # Return the output image and the classified label.
164
- return output_image, label
165
-
166
-
167
- def run(
168
- image: np.ndarray,
169
- model_complexity: int,
170
- enable_segmentation: bool,
171
- min_detection_confidence: float,
172
- background_color: str,
173
- ) -> np.ndarray:
174
- with mp_pose.Pose(
175
- static_image_mode=True,
176
- model_complexity=model_complexity,
177
- enable_segmentation=enable_segmentation,
178
- min_detection_confidence=min_detection_confidence,
179
- ) as pose:
180
- results = pose.process(image)
181
-
182
- res = image[:, :, ::-1].copy()
183
- if enable_segmentation:
184
- if background_color == "white":
185
- bg_color = 255
186
- elif background_color == "black":
187
- bg_color = 0
188
- elif background_color == "green":
189
- bg_color = (0, 255, 0) # type: ignore
190
- else:
191
- raise ValueError
192
-
193
- if results.segmentation_mask is not None:
194
- res[results.segmentation_mask <= 0.1] = bg_color
195
- else:
196
- res[:] = bg_color
197
-
198
- mp_drawing.draw_landmarks(
199
- res,
200
- results.pose_landmarks,
201
- mp_pose.POSE_CONNECTIONS,
202
- landmark_drawing_spec=mp_drawing_styles.get_default_pose_landmarks_style(),
203
- )
204
-
205
- if results.pose_landmarks:
206
- res, pose_classification = classifyPose(results.pose_landmarks.landmark, res) #Pose Classification code
207
-
208
- return res[:, :, ::-1]
209
-
210
-
211
- model_complexities = list(range(3))
212
- background_colors = ["white", "black", "green"]
213
-
214
- image_paths = sorted(pathlib.Path("images").rglob("*.jpg"))
215
- examples = [[path, model_complexities[1], True, 0.5, background_colors[0]] for path in image_paths]
216
-
217
- demo = gr.Interface(
218
- fn=run,
219
- inputs=[
220
- gr.Image(label="Input", type="numpy"),
221
- gr.Radio(label="Model Complexity", choices=model_complexities, type="index", value=model_complexities[1]),
222
- gr.Checkbox(label="Enable Segmentation", value=True),
223
- gr.Slider(label="Minimum Detection Confidence", minimum=0, maximum=1, step=0.05, value=0.5),
224
- gr.Radio(label="Background Color", choices=background_colors, type="value", value=background_colors[0]),
225
- ],
226
- outputs=gr.Image(label="Output"),
227
- examples=examples,
228
- title=TITLE,
229
- description=DESCRIPTION,
230
- )
231
-
232
- if __name__ == "__main__":
233
- demo.queue().launch()
 
1
+ from flask import Flask, request, jsonify
2
+ from PIL import Image
3
+ import io
4
+ import base64
5
+ import logging
 
 
 
 
 
6
  import numpy as np
7
 
8
+ app = Flask(__name__)
9
+
10
+ # Dummy pose recognition function
11
+ def recognize_pose(image):
12
+ # Replace with your model inference code
13
+ pose = "warrior" # Example dummy pose
14
+ return pose
15
+
16
+ @app.route('/api/recognize_pose', methods=['POST'])
17
+ def api_recognize_pose():
18
+ try:
19
+ # Decode the image from the request
20
+ image_data = request.json['image'].split(",")[1]
21
+ image_bytes = base64.b64decode(image_data)
22
+ image = Image.open(io.BytesIO(image_bytes))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
+ # Preprocess and recognize pose
25
+ image = preprocess_image(image)
26
+ pose = recognize_pose(image)
27
+ return jsonify({'pose': pose})
28
+ except KeyError as ke:
29
+ logging.error(f"Key Error: {ke}")
30
+ return jsonify({'error': 'Invalid input data format'}), 400
31
+ except Exception as e:
32
+ logging.error(f"Unexpected error: {e}")
33
+ return jsonify({'error': str(e)}), 500
34
+
35
+ def preprocess_image(image):
36
+ # Example preprocessing; adjust as needed for your model
37
+ image = image.resize((224, 224)) # Resize image to expected input size
38
+ image = np.array(image) / 255.0 # Normalize image
39
+ image = np.expand_dims(image, axis=0) # Add batch dimension
40
+ return image
41
+
42
+ if __name__ == '__main__':
43
+ app.run(debug=True)