File size: 3,313 Bytes
455e0af
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a1731eb
455e0af
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import gradio as gr
import keras
import cv2
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
import numpy as np
from PIL import Image

def modelpred(img):
  #Loading the saved model
  frame=cv2.imread(img)
  gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
  faces = faceCascade.detectMultiScale(gray,scaleFactor=1.1,minNeighbors=5,minSize=(60, 60),flags=cv2.CASCADE_SCALE_IMAGE)
  faces_list=[]
  preds=[]
  locs=[]
  label_list=[]
  for (x, y, w, h) in faces:
    face_frame = frame[y:y+h,x:x+w]
    face_frame = cv2.cvtColor(face_frame, cv2.COLOR_BGR2RGB)
    face_frame = cv2.resize(face_frame, (224, 224))
    face_frame = img_to_array(face_frame)
    face_frame = np.expand_dims(face_frame, axis=0)
    face_frame =  preprocess_input(face_frame)
    (startX, startY, endX, endY) = (x,y,x+w,y+h)
    box=(startX, startY, endX, endY)
    locs.append(box)
    p=model.predict(face_frame)
    preds.append(p)
      
  for (box,pred) in zip(locs,preds):
    (startX, startY, endX, endY) = box
    (withoutMask,mask,notproper) = pred[0]
    # print(pred)
      
    # determine the class label and color we'll use to draw
    # the bounding box and text
    if (mask > withoutMask and mask>notproper):
      label = "Great you have worn the mask correctly"
    elif ( withoutMask > notproper):
      label = "Please wear a mask"
    else:
      label = "Please wear the mask properly"

    if label == "Great you have worn the mask correctly":
      color = (0, 255, 0)
    elif label=="Please wear a mask":
      color = (0, 0, 255)
    else:
      color = (255, 140, 0)


          # include the probability in the label
    label = "{}: {:.2f}%".format(label,max(mask, withoutMask, notproper) * 100)
    label_list.append(label)
          # display the label and bounding box rectangle on the output
          # frame
    cv2.putText(frame, label, (startX, startY - 10),cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
    cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
      
# Display the resulting frame
  
# You may need to convert the color.
  img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
  im_pil = Image.fromarray(img)
  return (im_pil,label_list)
imgpath=gr.inputs.Image(type="filepath")
# webcam=gr.inputs.Image(source="webcam",type="filepath",optional=True)
iface = gr.Interface(fn=modelpred, inputs=imgpath, outputs=[gr.outputs.Image(type="pil"),"text"],title="Face Mask Detection using Deep Neural Networks",description="""Implementation of an efficient neural network to detect and differentiate between people with high accuracy into 3 classes - those who have correctly worn face masks, those who have worn masks incorrectly and those who have not worn them. Implemented and fine tuned a MobileNetV2 network for this task and achieved an accuracy of 92.02%.""",allow_flagging="never",live=False,examples=[["images-3.jpeg"],["power-family-with-father-mother-daughter-wearing-medical-face-mask-protect-2019-ncov-covid-19-corona-virus-stay-home-concept_73622-1419.jpg"],["3000-2.jpg"]])
model = keras.models.load_model('model')
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_alt2.xml')
iface.launch(debug=True)