kristyc commited on
Commit
fc9190a
β€’
1 Parent(s): 91b1b77

Setup inputs and outputs

Browse files
Files changed (5) hide show
  1. .gitattributes +1 -0
  2. README.md +1 -1
  3. app.py +46 -5
  4. examples/01.jpg +3 -0
  5. requirements.txt +1 -1
.gitattributes CHANGED
@@ -25,3 +25,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ *.jpg filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: Mediapipe Face Detection
3
  emoji: πŸ˜ƒ
4
  colorFrom: indigo
5
  colorTo: gray
 
1
  ---
2
+ title: Mediapipe's Face Detection
3
  emoji: πŸ˜ƒ
4
  colorFrom: indigo
5
  colorTo: gray
app.py CHANGED
@@ -7,10 +7,51 @@ from google.protobuf.json_format import MessageToDict
7
 
8
  logger = log_utils.get_logger()
9
 
10
- import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
- def greet(name):
13
- return "Hello " + name + "!!"
 
14
 
15
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
16
- iface.launch()
 
7
 
8
  logger = log_utils.get_logger()
9
 
10
+ mp_face_detection = mp.solutions.face_detection
11
+ mp_drawing = mp.solutions.drawing_utils
12
+
13
+ demo = gr.Blocks()
14
+
15
+ def detect_faces(image, model_selection, min_detection_confidence):
16
+ return {}, image
17
+
18
+ with demo:
19
+ gr.Markdown(
20
+ """
21
+ # MediaPipe's Face Detection
22
+ A demo of face detection using [Google's MediaPipe](https://google.github.io/mediapipe/solutions/face_detection.html)
23
+ """)
24
+
25
+ with gr.Row():
26
+ with gr.Column():
27
+
28
+ gr.Markdown("## Step 1: Configure the model")
29
+ with gr.Group():
30
+ model_selection = gr.Radio(label="Model selection", value=0, choices=[0, 1])
31
+ gr.Textbox(show_label=False, value="An integer index 0 or 1. Use 0 to select a short-range model that works best for faces within 2 meters from the camera, and 1 for a full-range model best for faces within 5 meters. For the full-range option, a sparse model is used for its improved inference speed.")
32
+
33
+ with gr.Group():
34
+ min_detection_confidence = gr.Slider(label="Min detection confidence", value=0.5, minimum=0.0, maximum=1.0, step=0.01)
35
+ gr.Textbox(show_label=False, value="Minimum confidence value ([0.0, 1.0]) from the face detection model for the detection to be considered successful.")
36
+
37
+ gr.Markdown("## Step 2: Select an image")
38
+ with gr.Tabs():
39
+ with gr.TabItem(label="Upload an image"):
40
+ upload_image = gr.Image(label="Input image")
41
+ gr.Examples(examples=[["examples/01.jpg"]], inputs=[upload_image])
42
+ upload_image_btn = gr.Button(value="Detect faces")
43
+ with gr.TabItem(label="Take a photo"):
44
+ webcam_image = gr.Image(label="Input image", source="webcam")
45
+ webcam_image_btn = gr.Button(value="Detect faces")
46
+
47
+ with gr.Column():
48
+ gr.Markdown("## Step 3: View the results")
49
+ with gr.Group():
50
+ raw_results = gr.JSON(label="Raw results")
51
+ output_image = gr.Image(label="Output image")
52
 
53
+ model_config_inputs = [model_selection, min_detection_confidence]
54
+ outputs = [raw_results, output_image]
55
+ upload_image_btn.click(fn=detect_faces, inputs=[upload_image, *model_config_inputs], outputs=outputs)
56
 
57
+ demo.launch()
 
examples/01.jpg ADDED

Git LFS Details

  • SHA256: c11527cf0e64df27f133eee6649a933058c7fe14210c9723a7a0d90e36bd08ce
  • Pointer size: 132 Bytes
  • Size of remote file: 1.78 MB
requirements.txt CHANGED
@@ -1,3 +1,3 @@
1
  gradio==3.0.26
2
  opencv-contrib-python==4.6.0.66
3
- mediapy==1.0.3
 
1
  gradio==3.0.26
2
  opencv-contrib-python==4.6.0.66
3
+ mediapipe==0.8.10.1