JefferyJapheth commited on
Commit
07de9bd
1 Parent(s): 9b0c68a

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -27
app.py CHANGED
@@ -1,8 +1,6 @@
1
  import os
2
 
3
- import cv2
4
  import mediapipe as mp
5
- import numpy as np
6
  import tensorflow as tf
7
 
8
  N_ROWS = 543
@@ -13,7 +11,6 @@ NUM_CLASSES = 250
13
  INPUT_SIZE = 32
14
 
15
 
16
-
17
  # Tensorflow layer to process data in TFLite
18
  # Data needs to be processed in the model itself, so we cannot use Python
19
  class PreprocessLayer(tf.keras.layers.Layer):
@@ -117,7 +114,6 @@ interpreter.allocate_tensors()
117
  input_details = interpreter.get_input_details()
118
  output_details = interpreter.get_output_details()
119
 
120
-
121
  index_to_class = {
122
  "TV": 0, "after": 1, "airplane": 2, "all": 3, "alligator": 4, "animal": 5, "another": 6, "any": 7, "apple": 8,
123
  "arm": 9, "aunt": 10, "awake": 11, "backyard": 12, "bad": 13, "balloon": 14, "bath": 15, "because": 16, "bed": 17,
@@ -199,9 +195,7 @@ with mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=
199
  import cv2
200
  import numpy as np
201
  import gradio as gr
202
- from tensorflow import lite as tflite
203
  import tensorflow as tf
204
- import mediapipe as mp
205
 
206
 
207
  # ... (Previous code remains the same)
@@ -235,27 +229,6 @@ with mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=
235
 
236
  # Set mediapipe model
237
  cap = cv2.VideoCapture(0)
238
- while cap.isOpened():
239
- # Read feed
240
- ret, frame = cap.read()
241
- if not ret:
242
- print("Failed to capture frame from the webcam.")
243
- break
244
-
245
- try:
246
- # Make predictions
247
- prediction = predict_with_webcam(frame)
248
-
249
- # Display the frame with the prediction
250
- cv2.putText(frame, prediction, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
251
- cv2.imshow('Webcam Landmark Prediction', frame)
252
-
253
- except Exception as e:
254
- print("Error occurred:", e)
255
-
256
- # Exit the loop when 'q' key is pressed
257
- if cv2.waitKey(1) & 0xFF == ord('q'):
258
- break
259
 
260
  cap.release()
261
  cv2.destroyAllWindows()
 
1
  import os
2
 
 
3
  import mediapipe as mp
 
4
  import tensorflow as tf
5
 
6
  N_ROWS = 543
 
11
  INPUT_SIZE = 32
12
 
13
 
 
14
  # Tensorflow layer to process data in TFLite
15
  # Data needs to be processed in the model itself, so we cannot use Python
16
  class PreprocessLayer(tf.keras.layers.Layer):
 
114
  input_details = interpreter.get_input_details()
115
  output_details = interpreter.get_output_details()
116
 
 
117
  index_to_class = {
118
  "TV": 0, "after": 1, "airplane": 2, "all": 3, "alligator": 4, "animal": 5, "another": 6, "any": 7, "apple": 8,
119
  "arm": 9, "aunt": 10, "awake": 11, "backyard": 12, "bad": 13, "balloon": 14, "bath": 15, "because": 16, "bed": 17,
 
195
  import cv2
196
  import numpy as np
197
  import gradio as gr
 
198
  import tensorflow as tf
 
199
 
200
 
201
  # ... (Previous code remains the same)
 
229
 
230
  # Set mediapipe model
231
  cap = cv2.VideoCapture(0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232
 
233
  cap.release()
234
  cv2.destroyAllWindows()