{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ed90912e-37c1-4a50-b864-53843c862470",
   "metadata": {},
   "outputs": [],
   "source": [
    "import dlib         # 人脸识别的库dlib\n",
    "import numpy as np  # 数据处理的库numpy\n",
    "import cv2          # 图像处理的库OpenCv\n",
    "import os\n",
    " \n",
    "# dlib预测器\n",
    "detector = dlib.get_frontal_face_detector()\n",
    "predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')\n",
    " \n",
    "# 读取图像的路径\n",
    "path_read = \".\\ImageFiles\\\\files\"\n",
    "num=0\n",
    "for file_name in os.listdir(path_read):\n",
    "\t#aa是图片的全路径\n",
    "    aa=(path_read +\"/\"+file_name)\n",
    "    #读入的图片的路径中含非英文\n",
    "    img=cv2.imdecode(np.fromfile(aa, dtype=np.uint8), cv2.IMREAD_UNCHANGED)\n",
    "    #获取图片的宽高\n",
    "    img_shape=img.shape\n",
    "    img_height=img_shape[0]\n",
    "    img_width=img_shape[1]\n",
    "   \n",
    "    # 用来存储生成的单张人脸的路径\n",
    "    path_save=\".\\ImageFiles\\\\files1\" \n",
    "    # dlib检测\n",
    "    dets = detector(img,1)\n",
    "    print(\"人脸数：\", len(dets))\n",
    "    for k, d in enumerate(dets):\n",
    "        if len(dets)>1:\n",
    "            continue\n",
    "        num=num+1\n",
    "        # 计算矩形大小\n",
    "        # (x,y), (宽度width, 高度height)\n",
    "        pos_start = tuple([d.left(), d.top()])\n",
    "        pos_end = tuple([d.right(), d.bottom()])\n",
    " \n",
    "        # 计算矩形框大小\n",
    "        height = d.bottom()-d.top()\n",
    "        width = d.right()-d.left()\n",
    " \n",
    "        # 根据人脸大小生成空的图像\n",
    "        img_blank = np.zeros((height, width, 3), np.uint8)\n",
    "        for i in range(height):\n",
    "            if d.top()+i>=img_height:# 防止越界\n",
    "                continue\n",
    "            for j in range(width):\n",
    "                if d.left()+j>=img_width:# 防止越界\n",
    "                    continue\n",
    "                img_blank[i][j] = img[d.top()+i][d.left()+j]\n",
    "        img_blank = cv2.resize(img_blank, (200, 200), interpolation=cv2.INTER_CUBIC)\n",
    "\n",
    "        cv2.imencode('.jpg', img_blank)[1].tofile(path_save+\"\\\\\"+\"file\"+str(num)+\".jpg\") # 正确方法\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3846e479-5a15-4107-b6a2-feac16957da4",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os, shutil\n",
    "# 原始数据集路径\n",
    "original_dataset_dir = '.\\ImageFiles\\\\files1'\n",
    "\n",
    "# 新的数据集\n",
    "base_dir = '.\\ImageFiles\\\\files2'\n",
    "os.mkdir(base_dir)\n",
    "\n",
    "# 训练图像、验证图像、测试图像的目录\n",
    "train_dir = os.path.join(base_dir, 'train')\n",
    "os.mkdir(train_dir)\n",
    "validation_dir = os.path.join(base_dir, 'validation')\n",
    "os.mkdir(validation_dir)\n",
    "test_dir = os.path.join(base_dir, 'test')\n",
    "os.mkdir(test_dir)\n",
    "\n",
    "train_cats_dir = os.path.join(train_dir, 'smile')\n",
    "os.mkdir(train_cats_dir)\n",
    "\n",
    "train_dogs_dir = os.path.join(train_dir, 'unsmile')\n",
    "os.mkdir(train_dogs_dir)\n",
    "\n",
    "validation_cats_dir = os.path.join(validation_dir, 'smile')\n",
    "os.mkdir(validation_cats_dir)\n",
    "\n",
    "validation_dogs_dir = os.path.join(validation_dir, 'unsmile')\n",
    "os.mkdir(validation_dogs_dir)\n",
    "\n",
    "test_cats_dir = os.path.join(test_dir, 'smile')\n",
    "os.mkdir(test_cats_dir)\n",
    "\n",
    "test_dogs_dir = os.path.join(test_dir, 'unsmile')\n",
    "os.mkdir(test_dogs_dir)\n",
    "\n",
    "# 复制1000张笑脸图片到train_c_dir\n",
    "fnames = ['file{}.jpg'.format(i) for i in range(1,900)]\n",
    "for fname in fnames:\n",
    "    src = os.path.join(original_dataset_dir, fname)\n",
    "    dst = os.path.join(train_cats_dir, fname)\n",
    "    shutil.copyfile(src, dst)\n",
    "\n",
    "fnames = ['file{}.jpg'.format(i) for i in range(900, 1350)]\n",
    "for fname in fnames:\n",
    "    src = os.path.join(original_dataset_dir, fname)\n",
    "    dst = os.path.join(validation_cats_dir, fname)\n",
    "    shutil.copyfile(src, dst)\n",
    "    \n",
    "# Copy next 500 cat images to test_cats_dir\n",
    "fnames = ['file{}.jpg'.format(i) for i in range(1350, 1800)]\n",
    "for fname in fnames:\n",
    "    src = os.path.join(original_dataset_dir, fname)\n",
    "    dst = os.path.join(test_cats_dir, fname)\n",
    "    shutil.copyfile(src, dst)\n",
    "    \n",
    "fnames = ['file{}.jpg'.format(i) for i in range(2127,3000)]\n",
    "for fname in fnames:\n",
    "    src = os.path.join(original_dataset_dir, fname)\n",
    "    dst = os.path.join(train_dogs_dir, fname)\n",
    "    shutil.copyfile(src, dst)\n",
    "    \n",
    "# Copy next 500 dog images to validation_dogs_dir\n",
    "fnames = ['file{}.jpg'.format(i) for i in range(3000,3304)]\n",
    "for fname in fnames:\n",
    "    src = os.path.join(original_dataset_dir, fname)\n",
    "    dst = os.path.join(validation_dogs_dir, fname)\n",
    "    shutil.copyfile(src, dst)\n",
    "    \n",
    "# # Copy next 500 dog images to test_dogs_dir\n",
    "# fnames = ['file{}.jpg'.format(i) for i in range(3000,3878)]\n",
    "# for fname in fnames:\n",
    "#     src = os.path.join(original_dataset_dir, fname)\n",
    "#     dst = os.path.join(test_dogs_dir, fname)\n",
    "#     shutil.copyfile(src, dst)\n",
    "#创建模型\n",
    "from keras import layers\n",
    "from keras import models\n",
    "model = models.Sequential()\n",
    "model.add(layers.Conv2D(32, (3, 3), activation='relu',input_shape=(150, 150, 3)))\n",
    "model.add(layers.MaxPooling2D((2, 2)))\n",
    "model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n",
    "model.add(layers.MaxPooling2D((2, 2)))\n",
    "model.add(layers.Conv2D(128, (3, 3), activation='relu'))\n",
    "model.add(layers.MaxPooling2D((2, 2)))\n",
    "model.add(layers.Conv2D(128, (3, 3), activation='relu'))\n",
    "model.add(layers.MaxPooling2D((2, 2)))\n",
    "model.add(layers.Flatten())\n",
    "model.add(layers.Dense(512, activation='relu'))\n",
    "model.add(layers.Dense(1, activation='sigmoid'))\n",
    "model.summary()#查看\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e92da101-b77f-4c18-89aa-4cceef68e42b",
   "metadata": {},
   "outputs": [],
   "source": [
    "#归一化\n",
    "from keras import optimizers\n",
    "model.compile(loss='binary_crossentropy',\n",
    "              optimizer=optimizers.RMSprop(lr=1e-4),\n",
    "              metrics=['acc'])\n",
    "from keras.preprocessing.image import ImageDataGenerator\n",
    "train_datagen = ImageDataGenerator(rescale=1./255)\n",
    "validation_datagen=ImageDataGenerator(rescale=1./255)\n",
    "test_datagen = ImageDataGenerator(rescale=1./255)\n",
    "train_generator = train_datagen.flow_from_directory(\n",
    "        # 目标文件目录\n",
    "        train_dir,\n",
    "        #所有图片的size必须是150x150\n",
    "        target_size=(150, 150),\n",
    "        batch_size=20,\n",
    "        # Since we use binary_crossentropy loss, we need binary labels\n",
    "        class_mode='binary')\n",
    "validation_generator = test_datagen.flow_from_directory(\n",
    "        validation_dir,\n",
    "        target_size=(150, 150),\n",
    "        batch_size=20,\n",
    "        class_mode='binary')\n",
    "test_generator = test_datagen.flow_from_directory(test_dir,\n",
    "                                                   target_size=(150, 150),\n",
    "                                                   batch_size=20,\n",
    "                                                   class_mode='binary')\n",
    "for data_batch, labels_batch in train_generator:\n",
    "    print('data batch shape:', data_batch.shape)\n",
    "    print('labels batch shape:', labels_batch)\n",
    "    break\n",
    "#'smile': 0, 'unsmile': 1\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a1ab3555-afed-4f09-a4a5-c194a553390e",
   "metadata": {},
   "outputs": [],
   "source": [
    "#数据增强\n",
    "datagen = ImageDataGenerator(\n",
    "      rotation_range=40,\n",
    "      width_shift_range=0.2,\n",
    "      height_shift_range=0.2,\n",
    "      shear_range=0.2,\n",
    "      zoom_range=0.2,\n",
    "      horizontal_flip=True,\n",
    "      fill_mode='nearest')\n",
    "#数据增强后图片变化\n",
    "import matplotlib.pyplot as plt\n",
    "# This is module with image preprocessing utilities\n",
    "from keras.preprocessing import image\n",
    "train_smile_dir = './ImageFiles//files2//train//smile/'\n",
    "fnames = [os.path.join(train_smile_dir, fname) for fname in os.listdir(train_smile_dir)]\n",
    "img_path = fnames[3]\n",
    "img = image.load_img(img_path, target_size=(150, 150))\n",
    "x = image.img_to_array(img)\n",
    "x = x.reshape((1,) + x.shape)\n",
    "i = 0\n",
    "for batch in datagen.flow(x, batch_size=1):\n",
    "    plt.figure(i)\n",
    "    imgplot = plt.imshow(image.array_to_img(batch[0]))\n",
    "    i += 1\n",
    "    if i % 4 == 0:\n",
    "        break\n",
    "plt.show()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b8a7d8c4-380e-4cb6-89d9-cddb3f8dc1f1",
   "metadata": {},
   "outputs": [],
   "source": [
    "#创建网络\n",
    "model = models.Sequential()\n",
    "model.add(layers.Conv2D(32, (3, 3), activation='relu',input_shape=(150, 150, 3)))\n",
    "model.add(layers.MaxPooling2D((2, 2)))\n",
    "model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n",
    "model.add(layers.MaxPooling2D((2, 2)))\n",
    "model.add(layers.Conv2D(128, (3, 3), activation='relu'))\n",
    "model.add(layers.MaxPooling2D((2, 2)))\n",
    "model.add(layers.Conv2D(128, (3, 3), activation='relu'))\n",
    "model.add(layers.MaxPooling2D((2, 2)))\n",
    "model.add(layers.Flatten())\n",
    "model.add(layers.Dropout(0.5))\n",
    "model.add(layers.Dense(512, activation='relu'))\n",
    "model.add(layers.Dense(1, activation='sigmoid'))\n",
    "model.compile(loss='binary_crossentropy',\n",
    "              optimizer=optimizers.RMSprop(lr=1e-4),\n",
    "              metrics=['acc'])\n",
    "#归一化处理\n",
    "train_datagen = ImageDataGenerator(\n",
    "    rescale=1./255,\n",
    "    rotation_range=40,\n",
    "    width_shift_range=0.2,\n",
    "    height_shift_range=0.2,\n",
    "    shear_range=0.2,\n",
    "    zoom_range=0.2,\n",
    "    horizontal_flip=True,)\n",
    "\n",
    "test_datagen = ImageDataGenerator(rescale=1./255)\n",
    "\n",
    "train_generator = train_datagen.flow_from_directory(\n",
    "        # This is the target directory\n",
    "        train_dir,\n",
    "        # All images will be resized to 150x150\n",
    "        target_size=(150, 150),\n",
    "        batch_size=32,\n",
    "        # Since we use binary_crossentropy loss, we need binary labels\n",
    "        class_mode='binary')\n",
    "\n",
    "validation_generator = test_datagen.flow_from_directory(\n",
    "        validation_dir,\n",
    "        target_size=(150, 150),\n",
    "        batch_size=32,\n",
    "        class_mode='binary')\n",
    "\n",
    "history = model.fit_generator(\n",
    "      train_generator,\n",
    "      steps_per_epoch=100,\n",
    "      epochs=60,  \n",
    "      validation_data=validation_generator,\n",
    "      validation_steps=50)\n",
    "model.save('smileAndUnsmile1.h5')\n",
    "\n",
    "#数据增强过后的训练集与验证集的精确度与损失度的图形\n",
    "acc = history.history['acc']\n",
    "val_acc = history.history['val_acc']\n",
    "loss = history.history['loss']\n",
    "val_loss = history.history['val_loss']\n",
    "\n",
    "epochs = range(len(acc))\n",
    "\n",
    "plt.plot(epochs, acc, 'bo', label='Training acc')\n",
    "plt.plot(epochs, val_acc, 'b', label='Validation acc')\n",
    "plt.title('Training and validation accuracy')\n",
    "plt.legend()\n",
    "plt.figure()\n",
    "\n",
    "plt.plot(epochs, loss, 'bo', label='Training loss')\n",
    "plt.plot(epochs, val_loss, 'b', label='Validation loss')\n",
    "plt.title('Training and validation loss')\n",
    "plt.legend()\n",
    "plt.show()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3c91f749-b119-439f-a9bb-e68b5a7ae835",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 单张图片进行判断  是笑脸还是非笑脸\n",
    "import cv2\n",
    "from keras.preprocessing import image\n",
    "from keras.models import load_model\n",
    "import numpy as np\n",
    "#加载模型\n",
    "model = load_model('smileAndUnsmile1.h5')\n",
    "#本地图片路径\n",
    "img_path='test.jpg'\n",
    "img = image.load_img(img_path, target_size=(150, 150))\n",
    "\n",
    "img_tensor = image.img_to_array(img)/255.0\n",
    "img_tensor = np.expand_dims(img_tensor, axis=0)\n",
    "prediction =model.predict(img_tensor)  \n",
    "print(prediction)\n",
    "if prediction[0][0]>0.5:\n",
    "    result='非笑脸'\n",
    "else:\n",
    "    result='笑脸'\n",
    "print(result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7c03b023-08f8-4eab-9015-feaed2cd4a6c",
   "metadata": {},
   "outputs": [],
   "source": [
    "#检测视频或者摄像头中的人脸\n",
    "import cv2\n",
    "from keras.preprocessing import image\n",
    "from keras.models import load_model\n",
    "import numpy as np\n",
    "import dlib\n",
    "from PIL import Image\n",
    "model = load_model('smileAndUnsmile1.h5')\n",
    "detector = dlib.get_frontal_face_detector()\n",
    "video=cv2.VideoCapture(0)\n",
    "font = cv2.FONT_HERSHEY_SIMPLEX\n",
    "def rec(img):\n",
    "    gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n",
    "    dets=detector(gray,1)\n",
    "    if dets is not None:\n",
    "        for face in dets:\n",
    "            left=face.left()\n",
    "            top=face.top()\n",
    "            right=face.right()\n",
    "            bottom=face.bottom()\n",
    "            cv2.rectangle(img,(left,top),(right,bottom),(0,255,0),2)\n",
    "            img1=cv2.resize(img[top:bottom,left:right],dsize=(150,150))\n",
    "            img1=cv2.cvtColor(img1,cv2.COLOR_BGR2RGB)\n",
    "            img1 = np.array(img1)/255.\n",
    "            img_tensor = img1.reshape(-1,150,150,3)\n",
    "            prediction =model.predict(img_tensor)    \n",
    "            if prediction[0][0]>0.5:\n",
    "                result='unsmile'\n",
    "            else:\n",
    "                result='smile'\n",
    "            cv2.putText(img, result, (left,top), font, 2, (0, 255, 0), 2, cv2.LINE_AA)\n",
    "        cv2.imshow('Video', img)\n",
    "while video.isOpened():\n",
    "    res, img_rd = video.read()\n",
    "    if not res:\n",
    "        break\n",
    "    rec(img_rd)\n",
    "    if cv2.waitKey(1) & 0xFF == ord('q'):\n",
    "        break\n",
    "video.release()\n",
    "cv2.destroyAllWindows()\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
