File size: 4,889 Bytes
c60e678
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ae2bfd10",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:absl:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "🛑 Exit triggered.\n",
      "✅ Releasing camera and closing windows.\n"
     ]
    }
   ],
   "source": [
    "import cv2\n",
    "import numpy as np\n",
    "import threading\n",
    "from tensorflow.keras.models import load_model\n",
    "from tensorflow.keras.preprocessing.image import img_to_array\n",
    "\n",
    "\n",
    "cv2.setNumThreads(1)\n",
    "\n",
    "\n",
    "\n",
    "face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + \"haarcascade_frontalface_default.xml\")\n",
    "if face_cascade.empty():\n",
    "    print(\"Error: Could not load face cascade.\")\n",
    "    exit()\n",
    "\n",
    "\n",
    "model_path = \"G:/aman office/practise/face_emotion_detection/face_emotion_detection.h5\"  \n",
    "try:\n",
    "    model = load_model(model_path)\n",
    "except Exception as e:\n",
    "    print(f\"Error loading model: {e}\")\n",
    "    exit()\n",
    "\n",
    "class_names = [\"Angry\", \"Disgust\", \"Fear\", \"Happy\", \"Neutral\", \"Sad\", \"Surprise\"]\n",
    "\n",
    "\n",
    "cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)  \n",
    "if not cap.isOpened():\n",
    "    print(\"Error: Could not access the webcam. Try restarting or using a different device ID.\")\n",
    "    exit()\n",
    "\n",
    "cv2.namedWindow(\"Real-time Emotion Detection\", cv2.WINDOW_NORMAL)\n",
    "cv2.resizeWindow(\"Real-time Emotion Detection\", 800, 600)\n",
    "\n",
    "def process_face(face_gray, output_list, idx):\n",
    "    try:\n",
    "        face = cv2.resize(face_gray, (48, 48))\n",
    "        face = img_to_array(face) / 255.0\n",
    "        face = np.expand_dims(face, axis=0)\n",
    "        face = np.expand_dims(face, axis=-1)\n",
    "        predictions = model.predict(face, verbose=0)[0]\n",
    "        output_list[idx] = predictions\n",
    "    except Exception as e:\n",
    "        print(f\"Prediction error: {e}\")\n",
    "        output_list[idx] = np.zeros(len(class_names))\n",
    "\n",
    "try:\n",
    "    while True:\n",
    "        ret, frame = cap.read()\n",
    "        if not ret:\n",
    "            print(\"Could not read frame.\")\n",
    "            break\n",
    "\n",
    "        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n",
    "        faces = face_cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5, minSize=(30, 30))\n",
    "\n",
    "        threads = []\n",
    "        results = [None] * len(faces)\n",
    "\n",
    "        for i, (x, y, w, h) in enumerate(faces):\n",
    "            face = gray[y:y + h, x:x + w]\n",
    "            thread = threading.Thread(target=process_face, args=(face, results, i))\n",
    "            thread.start()\n",
    "            threads.append((thread, x, y, w, h))\n",
    "\n",
    "        for i, (thread, x, y, w, h) in enumerate(threads):\n",
    "            thread.join()\n",
    "            predictions = results[i]\n",
    "            if predictions is None:\n",
    "                continue\n",
    "\n",
    "            predicted_index = np.argmax(predictions)\n",
    "            predicted_class = class_names[predicted_index]\n",
    "            confidence = round(predictions[predicted_index] * 100, 2)\n",
    "\n",
    "            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n",
    "            cv2.putText(frame, f\"{predicted_class}: {confidence}%\", (x, y - 10),\n",
    "                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)\n",
    "\n",
    "        cv2.imshow(\"Real-time Emotion Detection\", frame)\n",
    "\n",
    "        key = cv2.waitKey(10) & 0xFF\n",
    "        if key == ord(\"q\") or cv2.getWindowProperty(\"Real-time Emotion Detection\", cv2.WND_PROP_VISIBLE) < 1:\n",
    "            print(\"Exit triggered.\")\n",
    "            break\n",
    "\n",
    "finally:\n",
    "    print(\"Releasing camera and closing windows.\")\n",
    "    if cap.isOpened():\n",
    "        cap.release()\n",
    "    cv2.destroyAllWindows()\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}