vikranth1111 commited on
Commit
534fa54
1 Parent(s): ebfc407

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -16
app.py CHANGED
@@ -11,9 +11,26 @@ classifier = Classifier("keras_model.h5", "labels.txt")
11
  offset = 20
12
  imgSize = 300
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  def classify_hand(img):
15
  imgOutput = img.copy()
16
-
 
17
  hands, _ = detector.findHands(img)
18
 
19
  if hands:
@@ -43,8 +60,10 @@ def classify_hand(img):
43
  hGap = math.ceil((imgSize - hCal) / 2)
44
  imgWhite[hGap: hCal + hGap, :] = imgResize
45
 
 
46
  prediction, index = classifier.getPrediction(imgWhite, draw=False)
47
 
 
48
  cv2.rectangle(imgOutput, (x - offset, y - offset - 70), (x - offset + 400, y - offset + 60 - 50), (0, 255, 0),
49
  cv2.FILLED)
50
  cv2.putText(imgOutput, labels[index], (x, y - 30), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 0), 2)
@@ -52,18 +71,5 @@ def classify_hand(img):
52
 
53
  return imgOutput
54
 
55
- # OpenCV function to capture frames from the webcam
56
- def capture_frames():
57
- cap = cv2.VideoCapture(0)
58
- while True:
59
- success, img = cap.read()
60
- if not success:
61
- print("Error: Could not read frame from the camera.")
62
- break
63
- processed_img = classify_hand(img)
64
- cv2.imshow('Hand Gesture Recognition', processed_img)
65
- if cv2.waitKey(1) & 0xFF == ord('q'):
66
- break
67
-
68
- # Start capturing frames
69
- capture_frames()
 
11
  offset = 20
12
  imgSize = 300
13
 
14
+ # Try different camera indices until a valid one is found
15
+ camera_index = 0
16
+ cap = None
17
+
18
+ while cap is None or not cap.isOpened():
19
+ cap = cv2.VideoCapture(camera_index)
20
+ if cap is None or not cap.isOpened():
21
+ camera_index += 1
22
+ print(f"Error: Could not open camera with index {camera_index - 1}. Trying index {camera_index}.")
23
+
24
+ if cap.isOpened():
25
+ print(f"Camera opened successfully with index {camera_index}.")
26
+ else:
27
+ print("Error: No valid camera index found.")
28
+ exit()
29
+
30
  def classify_hand(img):
31
  imgOutput = img.copy()
32
+
33
+ # Detect hands
34
  hands, _ = detector.findHands(img)
35
 
36
  if hands:
 
60
  hGap = math.ceil((imgSize - hCal) / 2)
61
  imgWhite[hGap: hCal + hGap, :] = imgResize
62
 
63
+ # Get hand gesture prediction
64
  prediction, index = classifier.getPrediction(imgWhite, draw=False)
65
 
66
+ # Draw bounding box and label
67
  cv2.rectangle(imgOutput, (x - offset, y - offset - 70), (x - offset + 400, y - offset + 60 - 50), (0, 255, 0),
68
  cv2.FILLED)
69
  cv2.putText(imgOutput, labels[index], (x, y - 30), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 0), 2)
 
71
 
72
  return imgOutput
73
 
74
+ iface = gr.Interface(fn=classify_hand, inputs='webcam', outputs='image', live=True)
75
+ iface.launch()