wahab12's picture
Upload 3 files
a942a5f
from flask import Flask, request, jsonify
import os
import tensorflow as tf
import tensorflow_hub as hub
import numpy as np
import cv2
app = Flask(__name__)
# Define constants or parameters
min_kick_angle = 30 # Minimum angle for the leg to be considered a kick
frame_window = 10 # Number of frames to consider for action recognition
kick_counter = 0
highest_kick_frame = -1 # Initialize the frame number of the highest kick
highest_kick_knee = None # Initialize coordinates of the knee for the highest kick
highest_kick_hip = None # Initialize coordinates of the hip for the highest kick
# Initialize variables for action recognition
frame_buffer = []
# Load the MoveNet model for pose estimation from TensorFlow Hub
model = hub.load("https://tfhub.dev/google/movenet/singlepose/thunder/4")
pose_net = model.signatures['serving_default']
# Define upload folder for video files
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = {'mp4'}
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
# Function to detect front kick based on keypoints
def detect_front_kick_func(keypoints, frame_number):
keypoints_array = keypoints[0] # Get the NumPy array from the list
right_hip = keypoints_array[0, 0, 8, :] # Right hip is at index 8
right_knee = keypoints_array[0, 0, 9, :] # Right knee is at index 9
# print(right_hip, ' ', right_knee)
if right_knee[2] < 0.4 and right_hip[2] < 0.4:
return False, -1, None, None
angle = np.arctan2(right_knee[1] - right_hip[1], right_knee[0] - right_hip[0]) * 180 / np.pi
if angle > min_kick_angle:
return True, frame_number, right_knee, right_hip
else:
return False, -1, None, None
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/detect_front_kick', methods=['POST'])
def detect_front_kick():
try:
# Check if the 'video' field is in the request
if 'video' not in request.files:
return jsonify({'error': 'No video file provided'})
video_file = request.files['video']
# Check if the file has the allowed extension
if not allowed_file(video_file.filename):
return jsonify({'error': 'Invalid file format. Only MP4 videos are allowed.'})
# Save the video file to the upload folder with a secure name
video_filename = (video_file.filename)
video_filepath = os.path.join(app.config['UPLOAD_FOLDER'], video_filename)
video_file.save(video_filepath)
# Open the video file for processing
cap = cv2.VideoCapture(video_filepath)
# Check if the video file was opened successfully
if not cap.isOpened():
return jsonify({'error': 'Failed to open video file.'})
frame_number = 0 # Initialize frame number
while True:
ret, frame = cap.read()
if not ret:
break
# Preprocess the frame (resize, normalize, denoise, etc.)
# Perform pose estimation using MoveNet
resized_frame = cv2.resize(frame, (256, 256))
image = tf.constant(resized_frame, dtype=tf.int32)
image = tf.expand_dims(image, axis=0)
# Run model inference
outputs = pose_net(image)
keypoints = outputs['output_0'].numpy()
# Append the keypoints to the frame buffer
frame_buffer.append(keypoints)
# Maintain a sliding window of frames for action recognition
if len(frame_buffer) > frame_window:
frame_buffer.pop(0)
# Perform action recognition using the frame buffer
if len(frame_buffer) == frame_window:
is_kick, frame_with_kick, knee, hip = detect_front_kick_func(frame_buffer, frame_number)
if is_kick:
kick_counter += 1
if frame_with_kick > highest_kick_frame:
highest_kick_frame = frame_with_kick
highest_kick_knee = knee
highest_kick_hip = hip
frame_number += 1
cap.release()
response_data = {
'kick_counter': kick_counter,
'highest_kick_frame': highest_kick_frame,
'highest_kick_knee': highest_kick_knee.tolist() if highest_kick_knee is not None else None,
'highest_kick_hip': highest_kick_hip.tolist() if highest_kick_hip is not None else None,
}
return jsonify(response_data)
except Exception as e:
return jsonify({'error': str(e)})
@app.route('/home', methods=['GET'])
def homie():
return jsonify({"message":"none"})
if __name__ == '__main__':
app.run(debug=True)