datasciencedojo commited on
Commit
f25864b
1 Parent(s): 1a3c0f2

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +44 -0
  2. requirements.txt +1 -0
app.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import gradio as gr
3
+ import mediapipe as mp
4
+ mp_drawing = mp.solutions.drawing_utils
5
+ mp_drawing_styles = mp.solutions.drawing_styles
6
+ mp_hands = mp.solutions.hands
7
+
8
+ def fun(img):
9
+ print(type(img))
10
+ with mp_hands.Hands( model_complexity=0,min_detection_confidence=0.5,min_tracking_confidence=0.5) as hands:
11
+ img.flags.writeable = False
12
+ image = cv2.flip(img[:,:,::-1], 1)
13
+ # Convert the BGR image to RGB before processing.
14
+ results = hands.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
15
+ image.flags.writeable = True
16
+ if results.multi_hand_landmarks:
17
+ for hand_landmarks in results.multi_hand_landmarks:
18
+ mp_drawing.draw_landmarks(
19
+ image,
20
+ hand_landmarks,
21
+ mp_hands.HAND_CONNECTIONS,
22
+ mp_drawing_styles.get_default_hand_landmarks_style(),
23
+ mp_drawing_styles.get_default_hand_connections_style())
24
+
25
+
26
+ return cv2.flip(image[:,:,::-1],1)
27
+
28
+ with gr.Blocks() as demo:
29
+
30
+ with gr.Row():
31
+ with gr.Column():
32
+ input = gr.Webcam(streaming=True)
33
+
34
+ with gr.Column():
35
+ output = gr.outputs.Image()
36
+
37
+
38
+ input.stream(fn=fun,
39
+ inputs = input,
40
+ outputs = output)
41
+
42
+
43
+
44
+ demo.launch(debug=True)
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ mediapipe