{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import cv2\n",
    "import math\n",
    "import time\n",
    "import utils\n",
    "import matplotlib\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "gpus = tf.config.experimental.list_physical_devices('GPU')\n",
    "assert len(gpus) > 0, \"Not enough GPU hardware devices available\"\n",
    "tf.config.experimental.set_memory_growth(gpus[0], True)\n",
    "\n",
    "from PIL import Image \n",
    "from MTCN import mtcnn\n",
    "from crop_face import CropFace\n",
    "from insight import FaceVerifier\n",
    "from scipy.spatial import distance\n",
    "from tensorflow.keras.models import Model, Sequential\n",
    "from tensorflow.keras.layers import Conv2D, Input, PReLU, MaxPool2D, Reshape,Activation,Flatten, Dense, Permute\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n",
    "\n",
    "def show_single_image(img_arr):\n",
    "    plt.imshow(img_arr)\n",
    "    plt.axis('on')\n",
    "    plt.show()\n",
    "def show_large_image(img_arr):\n",
    "    fig = plt.figure(figsize = (18,32))\n",
    "    plt.imshow(img_arr)\n",
    "    plt.axis('on')\n",
    "    plt.show()\n",
    "    \n",
    "def readimg(name):   \n",
    "    images = tf.io.read_file(name, 'r')\n",
    "    images = tf.image.decode_jpeg(images, channels=3).numpy()\n",
    "    return images"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "#图片\n",
    "img = matplotlib.image.imread('img/timg2.jpg')\n",
    "show_large_image(img)\n",
    "img1 = readimg(\"./img/TP.jpg\")\n",
    "threshold = [0.5,0.6,0.7]\n",
    "# mtcnn_model = mtcnn()\n",
    "# insight_face = FaceVerifier()\n",
    "rectangles = mtcnn_model.detectFace(img, threshold)\n",
    "if len(rectangles) != 0 :\n",
    "    rectangles = utils.rect2square(np.array(rectangles,dtype=np.int32))\n",
    "#     rectangles = np.array(rectangles,dtype=np.int32)\n",
    "for rectangle in rectangles:\n",
    "    # 获取landmark在小图中的坐标\n",
    "    landmark = (np.reshape(rectangle[5:15],(5,2)) - np.array([int(rectangle[0]),int(rectangle[1])]))/(rectangle[3]-rectangle[1])*160\n",
    "    # 截取图像\n",
    "    crop_img = img[int(rectangle[1]):int(rectangle[3]), int(rectangle[0]):int(rectangle[2])]\n",
    "    crop_img = cv2.resize(crop_img,(160,160))\n",
    "    # 对齐\n",
    "    new_img,_ = utils.Alignment_1(crop_img,landmark)   \n",
    "#     show_single_image(new_img)\n",
    "\n",
    "    X = np.asarray(tf.cast([img1, new_img],dtype = tf.int32)) #2,128,128,3\n",
    "    emb = insight_face.extract_embeddings(X)\n",
    "    cos = 1-distance.cosine(emb[0], emb[1])\n",
    "    if cos>0.30:\n",
    "        cv2.rectangle(img, (int(rectangle[0]), int(rectangle[1])), (int(rectangle[2]), int(rectangle[3])), (0, 255, 0), 2)\n",
    "    else:\n",
    "        cv2.rectangle(img, (int(rectangle[0]), int(rectangle[1])), (int(rectangle[2]), int(rectangle[3])), (255, 0, 0), 2)\n",
    "show_large_image(img)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'mtcnn_model' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-2-b394420e0f38>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[0;32m     20\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     21\u001b[0m     \u001b[0mtim\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtime\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtime\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 22\u001b[1;33m     \u001b[0mrectangles\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mmtcnn_model\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdetectFace\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mframe\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mthreshold\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     23\u001b[0m     \u001b[1;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mrectangles\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m!=\u001b[0m \u001b[1;36m0\u001b[0m \u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     24\u001b[0m         \u001b[0mrectangles\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mutils\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrect2square\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0marray\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mrectangles\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mdtype\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mint32\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mNameError\u001b[0m: name 'mtcnn_model' is not defined"
     ]
    }
   ],
   "source": [
    "img1 = readimg(\"./img/TP.jpg\")\n",
    "threshold = [0.5,0.6,0.7]\n",
    "# mtcnn_model = mtcnn()\n",
    "# insight_face = FaceVerifier()\n",
    "vid = cv2.VideoCapture(\"./img/video.mp4\")\n",
    "video = cv2.VideoWriter(\"./img/res5.avi\", cv2.VideoWriter_fourcc('X','V','I','D'), 5, (1280,720))\n",
    "\n",
    "fps = 0.0\n",
    "\n",
    "i = 0\n",
    "while True:\n",
    "    i+=1\n",
    "    if i%10 != 0:\n",
    "        continue\n",
    "    return_value, frame = vid.read()\n",
    "    if return_value:\n",
    "        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
    "    else:\n",
    "        raise ValueError(\"No image!\")\n",
    "    \n",
    "    tim = time.time()\n",
    "    rectangles = mtcnn_model.detectFace(frame, threshold)\n",
    "    if len(rectangles) != 0 :\n",
    "        rectangles = utils.rect2square(np.array(rectangles,dtype=np.int32))\n",
    "    for rectangle in rectangles:\n",
    "        landmark = (np.reshape(rectangle[5:15],(5,2)) - np.array([int(rectangle[0]),int(rectangle[1])]))/(rectangle[3]-rectangle[1])*160\n",
    "        crop_img = frame[int(rectangle[1]):int(rectangle[3]), int(rectangle[0]):int(rectangle[2])]\n",
    "        try:\n",
    "            crop_img = cv2.resize(crop_img,(160,160))\n",
    "            new_img,_ = utils.Alignment_1(crop_img,landmark)   \n",
    "            X = np.asarray(tf.cast([img1, new_img],dtype = tf.int32)) #2,128,128,3\n",
    "            emb = insight_face.extract_embeddings(X)\n",
    "            cos = 1-distance.cosine(emb[0], emb[1])\n",
    "            if cos>0.2:\n",
    "                cv2.rectangle(frame, (int(rectangle[0]), int(rectangle[1])), (int(rectangle[2]), int(rectangle[3])), (0, 255, 0), 2)\n",
    "            else:\n",
    "                cv2.rectangle(frame, (int(rectangle[0]), int(rectangle[1])), (int(rectangle[2]), int(rectangle[3])), (255, 0, 0), 2)\n",
    "        except:\n",
    "            pass\n",
    "    fps  = ( fps + (1./(time.time()-tim)) ) / 2\n",
    "    cv2.putText(frame, \"FPS: {:.2f}\".format(fps), (0, 70),cv2.FONT_HERSHEY_COMPLEX_SMALL, 3, (255, 0, 0), 2)\n",
    "    result = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n",
    "    video.write(result)\n",
    "    print(\"写了%d帧\"%i)\n",
    "    if i == 500:\n",
    "        break\n",
    "#     cv2.imshow(\"find_trump\", result)\n",
    "#     if cv2.waitKey(1) & 0xFF == ord('q'): break\n",
    "video.release()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
