yogasta
p
8a8d4b7
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
from imutils.video import VideoStream
import numpy as np
import argparse
import imutils
import time
import cv2
import urllib
import gradio as gr
def detect_and_predict_mask(frame, faceNet, maskNet):
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),
(104.0, 177.0, 123.0))
faceNet.setInput(blob)
detections = faceNet.forward()
faces = []
locs = []
preds = []
for i in range(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > args["confidence"]:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
face = frame[startY:endY, startX:endX]
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = preprocess_input(face)
face = np.expand_dims(face, axis=0)
faces.append(face)
locs.append((startX, startY, endX, endY))
if len(faces) > 0:
preds = maskNet.predict(faces)
return (locs, preds)
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--face", type=str,
default="face_detector",
help="path to face detector model directory")
ap.add_argument("-m", "--model", type=str,
default="model.h5",
help="path to trained face mask detector model")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
print("[INFO] loading face detector model...")
# Define the URLs of the deploy.prototxt and caffe model files in the GitHub repo
prototxt_url = "https://huggingface.co/spaces/Yogasta/Real_Time_Face_Mask_Detector/raw/main/deploy.prototxt"
weights_url = "https://huggingface.co/spaces/Yogasta/Real_Time_Face_Mask_Detector/resolve/main/res10_300x300_ssd_iter_140000.caffemodel"
# Define the local paths where the files will be downloaded
prototxt_path = "deploy.prototxt"
weights_path = "res10_300x300_ssd_iter_140000.caffemodel"
# Download the files
urllib.request.urlretrieve(prototxt_url, prototxt_path)
urllib.request.urlretrieve(weights_url, weights_path)
faceNet = cv2.dnn.readNet(prototxt_path, weights_path)
print("[INFO] loading face mask detector model...")
maskNet = load_model(args["model"])
print("[INFO] starting video stream...")
# Streamlit initialization
#st.title("Real-Time Face Mask Detection")
#st.sidebar.title("Face Mask Detection")
## Select camera to feed the model
#available_cameras = {'Camera 1': 0, 'Camera 2': 1, 'Camera 3': 2}
#cam_id = st.sidebar.selectbox("Select which camera signal to use", list(available_cameras.keys()))
# Define holder for the processed image
#img_placeholder = st.empty()
#vs = VideoStream(src=available_cameras[cam_id]).start()
#time.sleep(2.0)
#startpred = st.button('Start Webcam')
def process_image(image):
(locs, preds) = detect_and_predict_mask(image, faceNet, maskNet)
for (box, pred) in zip(locs, preds):
(startX, startY, endX, endY) = box
withoutMask = pred[0]
mask = 1 - withoutMask
label = "Mask" if mask > withoutMask else "No Mask"
color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
cv2.putText(image, label, (startX, startY - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
cv2.rectangle(image, (startX, startY), (endX, endY), color, 2)
return image
iface = gr.Interface(
fn=process_image,
inputs=gr.inputs.Image(source="webcam", tool="opencv", type="numpy"),
outputs=gr.outputs.Image(type="numpy"),
live=True,
layout="vertical",
title="Real-Time Face Mask Detection",
description="A real-time face mask detection application using webcam input. Click 'Start' to activate the camera and see the detection results."
)
iface.launch()