JefferyJapheth commited on
Commit
ac125c0
1 Parent(s): 312d29d

rectified indentation

Browse files
Files changed (1) hide show
  1. app.py +18 -13
app.py CHANGED
@@ -1,11 +1,12 @@
1
  # Import the required libraries
2
- import cv2
3
- import numpy as np
4
  import os
 
 
5
  import gradio as gr
 
 
6
  import tensorflow as tf
7
  import tensorflow.lite as tflite
8
- import mediapipe as mp
9
 
10
  # Initialize MediaPipe solutions
11
  mp_holistic = mp.solutions.holistic
@@ -21,32 +22,36 @@ model_path = os.path.join(current_dir, model_filename)
21
  interpreter = tf.lite.Interpreter(model_path=model_path)
22
  interpreter.allocate_tensors()
23
 
 
24
  # ... (other functions from previous code)
25
 
26
  # Function to perform holistic detection using Mediapipe
27
  def mediapipe_detection(image, model):
28
  # COLOR CONVERSION BGR 2 RGB
29
  image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
30
- image_rgb.flags.writeable = False # Image is no longer writeable
31
- results = model.process(image_rgb) # Make prediction
32
- image_rgb.flags.writeable = True # Image is now writeable
33
  image_bgr = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2BGR) # COLOR CONVERSION RGB 2 BGR
34
  return image_bgr, results
35
 
36
 
37
-
38
  # Function to extract keypoints from Mediapipe results
39
  def extract_keypoints(results):
40
  lh = np.array([[res.x, res.y, res.z] for res in results.left_hand_landmarks.landmark]).flatten(
41
- ) if results.left_hand_landmarks else np.zeros(21*3)
42
  rh = np.array([[res.x, res.y, res.z] for res in results.right_hand_landmarks.landmark]).flatten(
43
- ) if results.right_hand_landmarks else np.zeros(21*3)
44
  pose = np.array([[res.x, res.y, res.z, res.visibility] for res in results.pose_landmarks.landmark]).flatten(
45
- ) if results.pose_landmarks else np.zeros(33*4)
46
- face = np.array([[res.x, res.y, res.z] for res in results.face_landmarks.landmark]).flatten(
47
- ) if results.face_landmarks else np.zeros(468*3)
 
48
  return
49
- np.concatenate([lh, rh, pose, face])
 
 
 
50
 
51
  # Main prediction function that combines everything
52
  def predict_with_webcam(frame):
 
1
  # Import the required libraries
 
 
2
  import os
3
+
4
+ import cv2
5
  import gradio as gr
6
+ import mediapipe as mp
7
+ import numpy as np
8
  import tensorflow as tf
9
  import tensorflow.lite as tflite
 
10
 
11
  # Initialize MediaPipe solutions
12
  mp_holistic = mp.solutions.holistic
 
22
  interpreter = tf.lite.Interpreter(model_path=model_path)
23
  interpreter.allocate_tensors()
24
 
25
+
26
  # ... (other functions from previous code)
27
 
28
  # Function to perform holistic detection using Mediapipe
29
  def mediapipe_detection(image, model):
30
  # COLOR CONVERSION BGR 2 RGB
31
  image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
32
+ image_rgb.flags.writeable = False # Image is no longer writeable
33
+ results = model.process(image_rgb) # Make prediction
34
+ image_rgb.flags.writeable = True # Image is now writeable
35
  image_bgr = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2BGR) # COLOR CONVERSION RGB 2 BGR
36
  return image_bgr, results
37
 
38
 
 
39
  # Function to extract keypoints from Mediapipe results
40
  def extract_keypoints(results):
41
  lh = np.array([[res.x, res.y, res.z] for res in results.left_hand_landmarks.landmark]).flatten(
42
+ ) if results.left_hand_landmarks else np.zeros(21 * 3)
43
  rh = np.array([[res.x, res.y, res.z] for res in results.right_hand_landmarks.landmark]).flatten(
44
+ ) if results.right_hand_landmarks else np.zeros(21 * 3)
45
  pose = np.array([[res.x, res.y, res.z, res.visibility] for res in results.pose_landmarks.landmark]).flatten(
46
+ ) if results.pose_landmarks else np.zeros(33 * 4)
47
+ face = np.array([[res.x, res.y, res.z] for res in
48
+ results.face_landmarks.landmark]).flatten(
49
+ ) if results.face_landmarks else np.zeros(468 * 3)
50
  return
51
+
52
+
53
+ np.concatenate([lh, rh, pose, face])
54
+
55
 
56
  # Main prediction function that combines everything
57
  def predict_with_webcam(frame):