{ "cells": [ { "cell_type": "code", "execution_count": 5, "id": "f51609a4", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "& was unexpected at this time.\n" ] } ], "source": [ "!pip install gradio" ] }, { "cell_type": "code", "execution_count": 2, "id": "9d989bf2", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "2.16.1\n" ] } ], "source": [ "import tensorflow as tf\n", "print(tf.version.VERSION)" ] }, { "cell_type": "code", "execution_count": 5, "id": "d789a404", "metadata": {}, "outputs": [], "source": [ "from tensorflow.keras.models import load_model\n", "from tensorflow.keras.layers import TFSMLayer" ] }, { "cell_type": "code", "execution_count": 7, "id": "1ca99ffa", "metadata": {}, "outputs": [ { "ename": "OSError", "evalue": "SavedModel file does not exist at: mask_detector.model\\{saved_model.pbtxt|saved_model.pb}", "output_type": "error", "traceback": [ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[1;31mOSError\u001b[0m Traceback (most recent call last)", "Cell \u001b[1;32mIn[7], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m model \u001b[38;5;241m=\u001b[39m \u001b[43mTFSMLayer\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmask_detector.model\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcall_endpoint\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mserving_default\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m)\u001b[49m\n", "File \u001b[1;32md:\\Programming\\Projects\\Mask_detector\\maskVenv\\Lib\\site-packages\\keras\\src\\export\\export_lib.py:710\u001b[0m, in \u001b[0;36mTFSMLayer.__init__\u001b[1;34m(self, filepath, call_endpoint, call_training_endpoint, trainable, name, dtype)\u001b[0m\n\u001b[0;32m 707\u001b[0m \u001b[38;5;66;03m# Initialize an empty layer, then add_weight() etc. as needed.\u001b[39;00m\n\u001b[0;32m 708\u001b[0m \u001b[38;5;28msuper\u001b[39m()\u001b[38;5;241m.\u001b[39m\u001b[38;5;21m__init__\u001b[39m(trainable\u001b[38;5;241m=\u001b[39mtrainable, name\u001b[38;5;241m=\u001b[39mname, dtype\u001b[38;5;241m=\u001b[39mdtype)\n\u001b[1;32m--> 710\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_reloaded_obj \u001b[38;5;241m=\u001b[39m \u001b[43mtf\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msaved_model\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfilepath\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 712\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfilepath \u001b[38;5;241m=\u001b[39m filepath\n\u001b[0;32m 713\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcall_endpoint \u001b[38;5;241m=\u001b[39m call_endpoint\n", "File \u001b[1;32md:\\Programming\\Projects\\Mask_detector\\maskVenv\\Lib\\site-packages\\tensorflow\\python\\saved_model\\load.py:912\u001b[0m, in \u001b[0;36mload\u001b[1;34m(export_dir, tags, options)\u001b[0m\n\u001b[0;32m 910\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(export_dir, os\u001b[38;5;241m.\u001b[39mPathLike):\n\u001b[0;32m 911\u001b[0m export_dir \u001b[38;5;241m=\u001b[39m os\u001b[38;5;241m.\u001b[39mfspath(export_dir)\n\u001b[1;32m--> 912\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43mload_partial\u001b[49m\u001b[43m(\u001b[49m\u001b[43mexport_dir\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtags\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43moptions\u001b[49m\u001b[43m)\u001b[49m[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mroot\u001b[39m\u001b[38;5;124m\"\u001b[39m]\n\u001b[0;32m 913\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m result\n", "File \u001b[1;32md:\\Programming\\Projects\\Mask_detector\\maskVenv\\Lib\\site-packages\\tensorflow\\python\\saved_model\\load.py:1016\u001b[0m, in \u001b[0;36mload_partial\u001b[1;34m(export_dir, filters, tags, options)\u001b[0m\n\u001b[0;32m 1011\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m tags \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(tags, \u001b[38;5;28mset\u001b[39m):\n\u001b[0;32m 1012\u001b[0m \u001b[38;5;66;03m# Supports e.g. tags=SERVING and tags=[SERVING]. Sets aren't considered\u001b[39;00m\n\u001b[0;32m 1013\u001b[0m \u001b[38;5;66;03m# sequences for nest.flatten, so we put those through as-is.\u001b[39;00m\n\u001b[0;32m 1014\u001b[0m tags \u001b[38;5;241m=\u001b[39m nest\u001b[38;5;241m.\u001b[39mflatten(tags)\n\u001b[0;32m 1015\u001b[0m saved_model_proto, debug_info \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m-> 1016\u001b[0m \u001b[43mloader_impl\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mparse_saved_model_with_debug_info\u001b[49m\u001b[43m(\u001b[49m\u001b[43mexport_dir\u001b[49m\u001b[43m)\u001b[49m)\n\u001b[0;32m 1018\u001b[0m loader \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[0;32m 1019\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m (\u001b[38;5;28mlen\u001b[39m(saved_model_proto\u001b[38;5;241m.\u001b[39mmeta_graphs) \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m1\u001b[39m \u001b[38;5;129;01mand\u001b[39;00m\n\u001b[0;32m 1020\u001b[0m saved_model_proto\u001b[38;5;241m.\u001b[39mmeta_graphs[\u001b[38;5;241m0\u001b[39m]\u001b[38;5;241m.\u001b[39mHasField(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mobject_graph_def\u001b[39m\u001b[38;5;124m\"\u001b[39m)):\n", "File \u001b[1;32md:\\Programming\\Projects\\Mask_detector\\maskVenv\\Lib\\site-packages\\tensorflow\\python\\saved_model\\loader_impl.py:59\u001b[0m, in \u001b[0;36mparse_saved_model_with_debug_info\u001b[1;34m(export_dir)\u001b[0m\n\u001b[0;32m 46\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mparse_saved_model_with_debug_info\u001b[39m(export_dir):\n\u001b[0;32m 47\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Reads the savedmodel as well as the graph debug info.\u001b[39;00m\n\u001b[0;32m 48\u001b[0m \n\u001b[0;32m 49\u001b[0m \u001b[38;5;124;03m Args:\u001b[39;00m\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 57\u001b[0m \u001b[38;5;124;03m parsed. Missing graph debug info file is fine.\u001b[39;00m\n\u001b[0;32m 58\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[1;32m---> 59\u001b[0m saved_model \u001b[38;5;241m=\u001b[39m \u001b[43mparse_saved_model\u001b[49m\u001b[43m(\u001b[49m\u001b[43mexport_dir\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 61\u001b[0m debug_info_path \u001b[38;5;241m=\u001b[39m file_io\u001b[38;5;241m.\u001b[39mjoin(\n\u001b[0;32m 62\u001b[0m path_helpers\u001b[38;5;241m.\u001b[39mget_debug_dir(export_dir),\n\u001b[0;32m 63\u001b[0m constants\u001b[38;5;241m.\u001b[39mDEBUG_INFO_FILENAME_PB)\n\u001b[0;32m 64\u001b[0m debug_info \u001b[38;5;241m=\u001b[39m graph_debug_info_pb2\u001b[38;5;241m.\u001b[39mGraphDebugInfo()\n", "File \u001b[1;32md:\\Programming\\Projects\\Mask_detector\\maskVenv\\Lib\\site-packages\\tensorflow\\python\\saved_model\\loader_impl.py:119\u001b[0m, in \u001b[0;36mparse_saved_model\u001b[1;34m(export_dir)\u001b[0m\n\u001b[0;32m 117\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mIOError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCannot parse file \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mpath_to_pbtxt\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mstr\u001b[39m(e)\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;124m\"\u001b[39m) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01me\u001b[39;00m\n\u001b[0;32m 118\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m--> 119\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mIOError\u001b[39;00m(\n\u001b[0;32m 120\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mSavedModel file does not exist at: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mexport_dir\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00mos\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39msep\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 121\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m{{\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00mconstants\u001b[38;5;241m.\u001b[39mSAVED_MODEL_FILENAME_PBTXT\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m|\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 122\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mconstants\u001b[38;5;241m.\u001b[39mSAVED_MODEL_FILENAME_PB\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;130;01m}}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m 123\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m saved_model\n", "\u001b[1;31mOSError\u001b[0m: SavedModel file does not exist at: mask_detector.model\\{saved_model.pbtxt|saved_model.pb}" ] } ], "source": [ "model = TFSMLayer(\"mask_detector.model\", call_endpoint='serving_default')" ] }, { "cell_type": "code", "execution_count": 3, "id": "c3735b5c", "metadata": {}, "outputs": [ { "ename": "ValueError", "evalue": "File format not supported: filepath=mask_detector.model. Keras 3 only supports V3 `.keras` files and legacy H5 format files (`.h5` extension). Note that the legacy SavedModel format is not supported by `load_model()` in Keras 3. In order to reload a TensorFlow SavedModel as an inference-only layer in Keras 3, use `keras.layers.TFSMLayer(mask_detector.model, call_endpoint='serving_default')` (note that your `call_endpoint` might have a different name).", "output_type": "error", "traceback": [ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[1;31mValueError\u001b[0m Traceback (most recent call last)", "Cell \u001b[1;32mIn[3], line 2\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[38;5;66;03m# load the face mask detector model from disk\u001b[39;00m\n\u001b[1;32m----> 2\u001b[0m maskNet \u001b[38;5;241m=\u001b[39m \u001b[43mload_model\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmask_detector.model\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n", "File \u001b[1;32md:\\Programming\\Projects\\Mask_detector\\maskVenv\\Lib\\site-packages\\keras\\src\\saving\\saving_api.py:193\u001b[0m, in \u001b[0;36mload_model\u001b[1;34m(filepath, custom_objects, compile, safe_mode)\u001b[0m\n\u001b[0;32m 187\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[0;32m 188\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mFile not found: filepath=\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mfilepath\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m. \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 189\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mPlease ensure the file is an accessible `.keras` \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 190\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mzip file.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 191\u001b[0m )\n\u001b[0;32m 192\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m--> 193\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[0;32m 194\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mFile format not supported: filepath=\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mfilepath\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m. \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 195\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mKeras 3 only supports V3 `.keras` files and \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 196\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mlegacy H5 format files (`.h5` extension). \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 197\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mNote that the legacy SavedModel format is not \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 198\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msupported by `load_model()` in Keras 3. In \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 199\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124morder to reload a TensorFlow SavedModel as an \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 200\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124minference-only layer in Keras 3, use \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 201\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m`keras.layers.TFSMLayer(\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 202\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mfilepath\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m, call_endpoint=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mserving_default\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m)` \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 203\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m(note that your `call_endpoint` \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 204\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmight have a different name).\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 205\u001b[0m )\n", "\u001b[1;31mValueError\u001b[0m: File format not supported: filepath=mask_detector.model. Keras 3 only supports V3 `.keras` files and legacy H5 format files (`.h5` extension). Note that the legacy SavedModel format is not supported by `load_model()` in Keras 3. In order to reload a TensorFlow SavedModel as an inference-only layer in Keras 3, use `keras.layers.TFSMLayer(mask_detector.model, call_endpoint='serving_default')` (note that your `call_endpoint` might have a different name)." ] } ], "source": [ "# load the face mask detector model from disk\n", "maskNet = load_model(\"mask_detector.model\")" ] }, { "cell_type": "code", "execution_count": 1, "id": "7a0dea2a", "metadata": {}, "outputs": [], "source": [ "# import the necessary packages\n", "from tensorflow.keras.applications.mobilenet_v2 import preprocess_input\n", "from tensorflow.keras.preprocessing.image import img_to_array\n", "from tensorflow.keras.models import load_model\n", "from imutils.video import VideoStream\n", "import numpy as np\n", "import imutils\n", "import time\n", "import cv2\n", "import os" ] }, { "cell_type": "code", "execution_count": 7, "id": "3ea65ae1", "metadata": {}, "outputs": [], "source": [ "import gradio as gr" ] }, { "cell_type": "code", "execution_count": 77, "id": "616836d6", "metadata": {}, "outputs": [], "source": [ "def detect_and_predict_mask(frame, faceNet, maskNet):\n", " try:\n", " # grab the dimensions of the frame and then construct a blob from it\n", " (h, w) = frame.shape[:2]\n", " blob = cv2.dnn.blobFromImage(frame, 1.0, (224,224),(104.0,177.0,123.0) )\n", "\n", " # pass the blob through the network and obtain the face detections\n", " faceNet.setInput(blob)\n", " detections = faceNet.forward()\n", " print(detections.shape)\n", "\n", " # initialize our list of faces, their corresponding locations, and the list of predictions from our face mask network\n", " faces = []\n", " locs = []\n", " preds = []\n", " # loop over the detections\n", " for i in range(0,detections.shape[2]):\n", " # extract the confidence (i.e., probability) associated with the detection\n", " confidence = detections[0,0,i,2]\n", "\n", " # filter out weak detections by ensuring the confidence is greater than minimum confidence\n", " if confidence > 0.5:\n", " # compute the (x, y)-cordinates of the bounding box for the object\n", " box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n", " (startX, startY, endX, endY) = box.astype(\"int\")\n", "\n", " # ensure the bounding boxes fall within the dimensions of the frame\n", " (startX , startY) = (max(0,startX) , max(0,startY))\n", " (endX, endY) = (min(w-1,endX) , min(h-1,endY))\n", "\n", " # extract the face ROI, convert it from BGR to RGB channel ordering, resize it to 224x224, and preprocess it face=frame[startY:endY, startX:endX]\n", " # bounding mask only for face detected\n", " face = frame[startY:endY , startX:endX]\n", " face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)\n", " face = cv2.resize(face, (224,224))\n", " face = img_to_array(face)\n", " face = preprocess_input(face)\n", "\n", " # add the face and bounding boxes to their respective lists\n", " faces.append(face)\n", " locs.append((startX, startY, endX, endY))\n", "\n", " # only make a predictions if at least one face was detected\n", " if len(faces) > 0:\n", " # far faster inference we'll make batch predictions on *all* faces at the same time rather than one-by-one predictions in the above 'for' loop\n", " faces = np.array(faces,dtype=\"float32\")\n", " preds = maskNet.predict(faces, batch_size=32)\n", "\n", " # return a 2-tuple of the face locations and their corresponding locations\n", " return (locs, preds)\n", " except Exception as e:\n", " print(e)" ] }, { "cell_type": "code", "execution_count": 47, "id": "da40c96f", "metadata": {}, "outputs": [], "source": [ "# load our serialized face detector model from disk\n", "prototxtPath = r\"deploy.prototxt.txt\"\n", "weightsPath = r\"res10_300x300_ssd_iter_140000.caffemodel\"\n", "faceNet = cv2.dnn.readNet(prototxtPath,weightsPath)\n", "\n", "# load the face mask detector model from disk\n", "maskNet = load_model(\"mask_detector.model\")" ] }, { "cell_type": "code", "execution_count": 106, "id": "b5751c2c", "metadata": {}, "outputs": [], "source": [ "import numpy" ] }, { "cell_type": "code", "execution_count": 134, "id": "04f7d873", "metadata": {}, "outputs": [], "source": [ "def webcam_stream(frame):\n", " if type(frame)==type(None):\n", " return\n", " while True:\n", " try:\n", " # grab the frame from the threaded video stream and resize it to have a max width of 400 pixels\n", " frame = imutils.resize(frame,width=400)\n", "\n", " # detect faces in the frame and determine if they are wearing a face mask or not\n", " (locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet)\n", "\n", " # loop over the detected face locations and their correspondings locations\n", " for (box, pred) in zip(locs, preds):\n", " # unpack the bounding box and predictions\n", " (startX, startY, endX, endY) = box\n", " (mask, withoutMask) = pred\n", "\n", " # determine the class label and color we'll use to draw the bounding box and text\n", " label = \"Mask\" if mask> withoutMask else \"No Mask\"\n", " color = (0,255,0) if label==\"Mask\" else (0,0,255)\n", "\n", " # include the probability in the label\n", " label = \"{}: {:.2f}%\".format(label,max(mask, withoutMask) *100)\n", "\n", " # display the label and bounding box rectangle on the output frame\n", " cv2.putText(frame,label,(startX,startY-10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)\n", " cv2.rectangle(frame, (startX,startY), (endX,endY),color,2)\n", " \n", "\n", " # show the output frame\n", " cv2.imshow(\"Frame\",frame)\n", " key = cv2.waitKey(1) & 0xFF\n", "\n", " # if the 'q' key was pressed, break from the loop\n", " if key == ord(\"q\"):\n", " break\n", " except Exception as e:\n", " print(e)\n", " \n", " return frame\n", "# do a bit of cleanup\n", "# cv2.destroyAllWindows()" ] }, { "cell_type": "code", "execution_count": 132, "id": "5c845891", "metadata": {}, "outputs": [], "source": [ "# def webcam_stream(vid):\n", "# return vid" ] }, { "cell_type": "code", "execution_count": 133, "id": "b30a1bf8", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Running on local URL: http://127.0.0.1:7892\n", "\n", "To create a public link, set `share=True` in `launch()`.\n" ] }, { "data": { "text/html": [ "
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [] }, "execution_count": 133, "metadata": {}, "output_type": "execute_result" }, { "name": "stdout", "output_type": "stream", "text": [ "\n", "(1, 1, 200, 7)\n", "1/1 [==============================] - 0s 64ms/step\n", "\n", "\n", "(1, 1, 200, 7)\n", "1/1 [==============================] - 0s 67ms/step\n", "\n" ] } ], "source": [ "webcam = gr.Image(source=\"webcam\",every=\"float\",mirror_webcam=True)\n", "output = gr.Image(source=\"webcam\")\n", "# Create a Gradio interface with the webcam_stream function\n", "app = gr.Interface(webcam_stream,inputs=webcam,outputs=output,live=True)\n", "\n", "# Start the app\n", "app.launch() " ] }, { "cell_type": "code", "execution_count": 139, "id": "d6124d93", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Closing server running on port: 7871\n", "Closing server running on port: 7882\n", "Closing server running on port: 7887\n", "Closing server running on port: 7867\n", "Closing server running on port: 7868\n", "Closing server running on port: 7866\n", "Closing server running on port: 7879\n", "Closing server running on port: 7865\n", "Closing server running on port: 7860\n", "Closing server running on port: 7876\n", "Closing server running on port: 7868\n", "Closing server running on port: 7863\n", "Closing server running on port: 7867\n", "Closing server running on port: 7860\n", "Closing server running on port: 7873\n", "Closing server running on port: 7886\n", "Closing server running on port: 7888\n", "Closing server running on port: 7872\n", "Closing server running on port: 7870\n", "Closing server running on port: 7870\n", "Closing server running on port: 7869\n", "Closing server running on port: 7882\n", "Closing server running on port: 7863\n", "Closing server running on port: 7892\n", "Closing server running on port: 7890\n", "Closing server running on port: 7875\n", "Closing server running on port: 7889\n", "Closing server running on port: 7869\n", "Closing server running on port: 7878\n", "Closing server running on port: 7885\n", "Closing server running on port: 7877\n", "Closing server running on port: 7884\n", "Closing server running on port: 7891\n", "Closing server running on port: 7880\n", "Closing server running on port: 7881\n", "Closing server running on port: 7871\n", "Closing server running on port: 7881\n", "Closing server running on port: 7861\n", "Closing server running on port: 7876\n", "Closing server running on port: 7878\n", "Closing server running on port: 7883\n", "Closing server running on port: 7874\n", "Closing server running on port: 7864\n", "Closing server running on port: 7862\n", "Closing server running on port: 7877\n", "Closing server running on port: 7873\n", "Closing server running on port: 7879\n", "Closing server running on port: 7866\n", "Closing server running on port: 7875\n", "Closing server running on port: 7874\n", "Closing server running on port: 7865\n", "Closing server running on port: 7872\n", "Closing server running on port: 7861\n", "Closing server running on port: 7862\n", "Closing server running on port: 7864\n", "Closing server running on port: 7880\n" ] } ], "source": [ "gr.close_all()" ] }, { "cell_type": "code", "execution_count": 138, "id": "efd8e5b3", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Closing server running on port: 7892\n" ] } ], "source": [ "app.close()" ] }, { "cell_type": "code", "execution_count": null, "id": "29d9ea46", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.5" } }, "nbformat": 4, "nbformat_minor": 5 }