{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": true
   },
   "source": [
    "### TF Record Generation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import menpo.io as mio\n",
    "from menpo.shape import PointCloud\n",
    "from menpo.visualize import print_progress\n",
    "from menpo.image import Image\n",
    "from menpo.transform import Translation, Scale\n",
    "import cv2\n",
    "\n",
    "from pathlib import Path\n",
    "import scipy.io as sio\n",
    "from io import BytesIO\n",
    "\n",
    "from scipy.spatial.distance import pdist\n",
    "import numpy as np\n",
    "\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline\n",
    "\n",
    "import tensorflow as tf\n",
    "slim = tf.contrib.slim\n",
    "\n",
    "import sys\n",
    "import detect_face\n",
    "\n",
    "from scipy import misc\n",
    "from PIL import Image as PImage"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def crop_image_bounding_box(img, bbox, res, base=256., order=1):\n",
    "   \n",
    "    center = bbox.centre()\n",
    "    bmin, bmax = bbox.bounds()\n",
    "    scale = np.linalg.norm(bmax - bmin) / base\n",
    "\n",
    "    return crop_image(img, center, scale, res, base, order=order)\n",
    "\n",
    "def crop_image(img, center, scale, res, base=256., order=1):\n",
    "    h = scale\n",
    "\n",
    "    t = Translation(\n",
    "        [\n",
    "            res[0] * (-center[0] / h + .5),\n",
    "            res[1] * (-center[1] / h + .5)\n",
    "        ]).compose_after(Scale((res[0] / h, res[1] / h))).pseudoinverse()\n",
    "\n",
    "    # Upper left point\n",
    "    ul = np.floor(t.apply([0, 0]))\n",
    "    # Bottom right point\n",
    "    br = np.ceil(t.apply(res).astype(np.int))\n",
    "\n",
    "    # crop and rescale\n",
    "    cimg, trans = img.warp_to_shape(\n",
    "        br - ul, Translation(-(br - ul) / 2 + (br + ul) / 2), return_transform=True)\n",
    "\n",
    "    c_scale = np.min(cimg.shape) / np.mean(res)\n",
    "    new_img = cimg.rescale(1 / c_scale, order=order).resize(res, order=order)\n",
    "\n",
    "    trans = trans.compose_after(Scale([c_scale, c_scale]))\n",
    "\n",
    "    return new_img, trans"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "minsize = 40 # minimum size of face\n",
    "threshold = [ 0.6, 0.7, 0.7 ]  # three steps's threshold\n",
    "factor = 0.709 # scale factor\n",
    "\n",
    "with tf.Graph().as_default():\n",
    "    sess = tf.Session()\n",
    "    with sess.as_default():\n",
    "        pnet, rnet, onet = detect_face.create_detector(sess, './detection')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 3D_84"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "record_name = 'menpo.tfrecords'\n",
    "record_save_path = Path('./3D84/')\n",
    "image_load_path = Path('/media/jd4615/data3/alignment3D/3D84/data/helen/menpo')\n",
    "\n",
    "def get_jpg_string(im):\n",
    "    fp = BytesIO()\n",
    "    mio.export_image(im, fp, extension='jpg')\n",
    "    fp.seek(0)\n",
    "    return fp.read()\n",
    "\n",
    "def _int_feauture(value):\n",
    "    return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n",
    "\n",
    "def _bytes_feauture(value):\n",
    "    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n",
    "\n",
    "def _float_feauture(value):\n",
    "    return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))\n",
    "\n",
    "def face_iterator(image_load_path=image_load_path):\n",
    "\n",
    "    for img in print_progress(mio.import_images(image_load_path, verbose=True)):\n",
    "        image_path = img.path\n",
    "        \n",
    "        image_name = image_path.name\n",
    "\n",
    "        status = 0\n",
    "               \n",
    "        pts = np.zeros((84,2))\n",
    "        \n",
    "        binary_mask = np.zeros(84)\n",
    "\n",
    "        mask_index = np.arange(84)\n",
    "\n",
    "        binary_mask[mask_index] = 1\n",
    "        \n",
    "        bbox = img.landmarks['LJSON'].lms.bounding_box()\n",
    "\n",
    "        bounding_boxes = np.array([[bbox.points[0][1], bbox.points[0][0], bbox.points[2][1], bbox.points[2][0], 1.]])\n",
    "\n",
    "        bounding_boxes, points = detect_face.box_regression(img.pixels_with_channels_at_back() * 255, onet, bounding_boxes, 0.001)\n",
    "        \n",
    "        bbox = PointCloud(bounding_boxes[0,[1,0,3,2]].reshape([2,2])).bounding_box()\n",
    "        \n",
    "        crop_img,_ = crop_image_bounding_box(img, bbox, [384., 384.], base=256./384., order=1)\n",
    "                \n",
    "        pts[mask_index] = crop_img.landmarks['LJSON'].lms.points\n",
    "\n",
    "        yield image_name, crop_img, status, pts, binary_mask, mask_index\n",
    "\n",
    "def generate(iterator,\n",
    "             record_save_path=record_save_path,\n",
    "             record_name=record_name,\n",
    "             store_records=True):\n",
    "\n",
    "    if store_records:\n",
    "        writer = tf.python_io.TFRecordWriter(str(record_save_path/record_name))\n",
    "\n",
    "    for image_name, img, status, pts, binary_mask, mask_index in iterator:\n",
    "\n",
    "        if store_records:\n",
    "            example = tf.train.Example(\n",
    "                features=tf.train.Features(\n",
    "                  feature={\n",
    "                    # images\n",
    "                    'image': _bytes_feauture(get_jpg_string(img)),\n",
    "                    'height': _int_feauture(img.shape[0]),\n",
    "                    'width': _int_feauture(img.shape[1]),\n",
    "                    # landmarks\n",
    "                    'n_landmarks': _int_feauture(len(pts)),\n",
    "                    'gt_pts': _bytes_feauture(pts.astype(np.float32).tobytes()),\n",
    "                    'gt_mask': _bytes_feauture(binary_mask.astype(np.float32).tobytes()),\n",
    "                    'mask_index': _bytes_feauture(mask_index.astype(np.float32).tobytes()),\n",
    "                    # status\n",
    "                    'status': _int_feauture(status),\n",
    "                        \n",
    "            }))\n",
    "            serialized = example.SerializeToString()\n",
    "            writer.write(serialized)\n",
    "    if store_records:\n",
    "        writer.close()\n",
    "\n",
    "generate(face_iterator(), record_save_path, record_name,  store_records=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 2D_68_frontal "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "record_name = 'menpo_frontal.tfrecords'\n",
    "record_save_path = Path('./2D68/')\n",
    "image_load_path = Path('/media/jd4615/data3/alignment3D/3D84/data/menpo/refine/semifrontal')\n",
    "landmarkNum = 68\n",
    "\n",
    "def get_jpg_string(im):\n",
    "    fp = BytesIO()\n",
    "    mio.export_image(im, fp, extension='jpg')\n",
    "    fp.seek(0)\n",
    "    return fp.read()\n",
    "\n",
    "def _int_feauture(value):\n",
    "    return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n",
    "\n",
    "def _bytes_feauture(value):\n",
    "    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n",
    "\n",
    "def _float_feauture(value):\n",
    "    return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))\n",
    "\n",
    "def face_iterator(image_load_path=image_load_path):\n",
    "\n",
    "    for img in print_progress(mio.import_images(image_load_path, verbose=True)):\n",
    "        image_path = img.path\n",
    "        \n",
    "        image_name = image_path.name\n",
    "\n",
    "        status = 0\n",
    "               \n",
    "        pts = np.zeros((landmarkNum,2))\n",
    "        \n",
    "        binary_mask = np.zeros(landmarkNum)\n",
    "\n",
    "        mask_index = np.arange(landmarkNum)\n",
    "\n",
    "        binary_mask[mask_index] = 1\n",
    "        \n",
    "        bbox = img.landmarks['PTS'].lms.bounding_box()\n",
    "\n",
    "        bounding_boxes = np.array([[bbox.points[0][1], bbox.points[0][0], bbox.points[2][1], bbox.points[2][0], 1.]])\n",
    "\n",
    "        bounding_boxes, points = detect_face.box_regression(img.pixels_with_channels_at_back() * 255, onet, bounding_boxes, 0.000001)\n",
    "        \n",
    "        bbox = PointCloud(bounding_boxes[0,[1,0,3,2]].reshape([2,2])).bounding_box()\n",
    "        \n",
    "        crop_img,_ = crop_image_bounding_box(img, bbox, [384., 384.], base=256./384., order=1)\n",
    "                \n",
    "        pts[mask_index] = crop_img.landmarks['PTS'].lms.points\n",
    "\n",
    "        yield image_name, crop_img, status, pts, binary_mask, mask_index\n",
    "\n",
    "def generate(iterator,\n",
    "             record_save_path=record_save_path,\n",
    "             record_name=record_name,\n",
    "             store_records=True):\n",
    "\n",
    "    if store_records:\n",
    "        writer = tf.python_io.TFRecordWriter(str(record_save_path/record_name))\n",
    "\n",
    "    for image_name, img, status, pts, binary_mask, mask_index in iterator:\n",
    "\n",
    "        if store_records:\n",
    "            example = tf.train.Example(\n",
    "                features=tf.train.Features(\n",
    "                  feature={\n",
    "                    # images\n",
    "                    'image': _bytes_feauture(get_jpg_string(img)),\n",
    "                    'height': _int_feauture(img.shape[0]),\n",
    "                    'width': _int_feauture(img.shape[1]),\n",
    "                    # landmarks\n",
    "                    'n_landmarks': _int_feauture(len(pts)),\n",
    "                    'gt_pts': _bytes_feauture(pts.astype(np.float32).tobytes()),\n",
    "                    'gt_mask': _bytes_feauture(binary_mask.astype(np.float32).tobytes()),\n",
    "                    'mask_index': _bytes_feauture(mask_index.astype(np.float32).tobytes()),\n",
    "                    # status\n",
    "                    'status': _int_feauture(status),\n",
    "                        \n",
    "            }))\n",
    "            serialized = example.SerializeToString()\n",
    "            writer.write(serialized)\n",
    "    if store_records:\n",
    "        writer.close()\n",
    "\n",
    "generate(face_iterator(), record_save_path, record_name,  store_records=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 2D_68_profile_right_39"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "record_name = 'menpo_profile_right.tfrecords'\n",
    "record_save_path = Path('./2D68/')\n",
    "image_load_path = Path('/media/jd4615/data3/alignment3D/3D84/data/menpo/refine/profile_right')\n",
    "landmarkNum = 68\n",
    "\n",
    "def get_jpg_string(im):\n",
    "    fp = BytesIO()\n",
    "    mio.export_image(im, fp, extension='jpg')\n",
    "    fp.seek(0)\n",
    "    return fp.read()\n",
    "\n",
    "def _int_feauture(value):\n",
    "    return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n",
    "\n",
    "def _bytes_feauture(value):\n",
    "    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n",
    "\n",
    "def _float_feauture(value):\n",
    "    return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))\n",
    "\n",
    "def face_iterator(image_load_path=image_load_path):\n",
    "\n",
    "    for img in print_progress(mio.import_images(image_load_path, verbose=True)):\n",
    "        image_path = img.path\n",
    "        \n",
    "        image_name = image_path.name\n",
    "               \n",
    "        pts = np.zeros((landmarkNum,2))\n",
    "        \n",
    "        binary_mask = np.zeros(landmarkNum)\n",
    "        \n",
    "        status = 1\n",
    "        mask_index = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 18, 19, 21, 22, \n",
    "                               28, 29, 30, 31, 34, 32, 39, 38, 37, 42, 41, 52, 51, 50, 49, 60, 59, 58, 63, 62, 61, 68, 67])-1\n",
    "\n",
    "        binary_mask[mask_index] = 1\n",
    "        \n",
    "        bbox = img.landmarks['PTS'].lms.bounding_box()\n",
    "\n",
    "        bounding_boxes = np.array([[bbox.points[0][1], bbox.points[0][0], bbox.points[2][1], bbox.points[2][0], 1.]])\n",
    "\n",
    "        bounding_boxes, points = detect_face.box_regression(img.pixels_with_channels_at_back() * 255, onet, bounding_boxes, 0.000001)\n",
    "        \n",
    "        bbox = PointCloud(bounding_boxes[0,[1,0,3,2]].reshape([2,2])).bounding_box()\n",
    "        \n",
    "        crop_img,_ = crop_image_bounding_box(img, bbox, [384., 384.], base=256./384., order=1)\n",
    "                \n",
    "        pts[mask_index] = crop_img.landmarks['PTS'].lms.points\n",
    "        \n",
    "        mask_index = np.concatenate([mask_index,np.zeros(landmarkNum-len(mask_index))-1])\n",
    "\n",
    "        yield image_name, crop_img, status, pts, binary_mask, mask_index\n",
    "\n",
    "def generate(iterator,\n",
    "             record_save_path=record_save_path,\n",
    "             record_name=record_name,\n",
    "             store_records=True):\n",
    "\n",
    "    if store_records:\n",
    "        writer = tf.python_io.TFRecordWriter(str(record_save_path/record_name))\n",
    "\n",
    "    for image_name, img, status, pts, binary_mask, mask_index in iterator:\n",
    "\n",
    "        if store_records:\n",
    "            example = tf.train.Example(\n",
    "                features=tf.train.Features(\n",
    "                  feature={\n",
    "                    # images\n",
    "                    'image': _bytes_feauture(get_jpg_string(img)),\n",
    "                    'height': _int_feauture(img.shape[0]),\n",
    "                    'width': _int_feauture(img.shape[1]),\n",
    "                    # landmarks\n",
    "                    'n_landmarks': _int_feauture(len(pts)),\n",
    "                    'gt_pts': _bytes_feauture(pts.astype(np.float32).tobytes()),\n",
    "                    'gt_mask': _bytes_feauture(binary_mask.astype(np.float32).tobytes()),\n",
    "                    'mask_index': _bytes_feauture(mask_index.astype(np.float32).tobytes()),\n",
    "                    # status\n",
    "                    'status': _int_feauture(status),\n",
    "                        \n",
    "            }))\n",
    "            serialized = example.SerializeToString()\n",
    "            writer.write(serialized)\n",
    "    if store_records:\n",
    "        writer.close()\n",
    "\n",
    "generate(face_iterator(), record_save_path, record_name,  store_records=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 2D_68_profile_left_39"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "record_name = 'menpo_profile_left.tfrecords'\n",
    "record_save_path = Path('./2D68/')\n",
    "image_load_path = Path('/media/jd4615/data3/alignment3D/3D84/data/menpo/refine/profile_left')\n",
    "landmarkNum = 68\n",
    "def get_jpg_string(im):\n",
    "    fp = BytesIO()\n",
    "    mio.export_image(im, fp, extension='jpg')\n",
    "    fp.seek(0)\n",
    "    return fp.read()\n",
    "\n",
    "def _int_feauture(value):\n",
    "    return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n",
    "\n",
    "def _bytes_feauture(value):\n",
    "    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n",
    "\n",
    "def _float_feauture(value):\n",
    "    return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))\n",
    "\n",
    "def face_iterator(image_load_path=image_load_path):\n",
    "\n",
    "    for img in print_progress(mio.import_images(image_load_path, verbose=True)):\n",
    "        image_path = img.path\n",
    "        \n",
    "        image_name = image_path.name\n",
    "               \n",
    "        pts = np.zeros((landmarkNum,2))\n",
    "        \n",
    "        binary_mask = np.zeros(landmarkNum)\n",
    "        \n",
    "        status = -1\n",
    "        mask_index = np.array([17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 27, 26, 24, 23,\n",
    "                               28, 29, 30, 31, 34, 36, 44, 45, 46, 47, 48, 52, 53, 54, 55, 56, 57, 58, 63, 64, 65, 66, 67])-1\n",
    "\n",
    "\n",
    "        binary_mask[mask_index] = 1\n",
    "        \n",
    "        bbox = img.landmarks['PTS'].lms.bounding_box()\n",
    "\n",
    "        bounding_boxes = np.array([[bbox.points[0][1], bbox.points[0][0], bbox.points[2][1], bbox.points[2][0], 1.]])\n",
    "\n",
    "        bounding_boxes, points = detect_face.box_regression(img.pixels_with_channels_at_back() * 255, onet, bounding_boxes, 0.000001)\n",
    "        \n",
    "        bbox = PointCloud(bounding_boxes[0,[1,0,3,2]].reshape([2,2])).bounding_box()\n",
    "        \n",
    "        crop_img,_ = crop_image_bounding_box(img, bbox, [384., 384.], base=256./384., order=1)\n",
    "                \n",
    "        pts[mask_index] = crop_img.landmarks['PTS'].lms.points\n",
    "        \n",
    "        mask_index = np.concatenate([mask_index,np.zeros(landmarkNum-len(mask_index))-1])\n",
    "\n",
    "        yield image_name, crop_img, status, pts, binary_mask, mask_index\n",
    "\n",
    "def generate(iterator,\n",
    "             record_save_path=record_save_path,\n",
    "             record_name=record_name,\n",
    "             store_records=True):\n",
    "\n",
    "    if store_records:\n",
    "        writer = tf.python_io.TFRecordWriter(str(record_save_path/record_name))\n",
    "\n",
    "    for image_name, img, status, pts, binary_mask, mask_index in iterator:\n",
    "\n",
    "        if store_records:\n",
    "            example = tf.train.Example(\n",
    "                features=tf.train.Features(\n",
    "                  feature={\n",
    "                    # images\n",
    "                    'image': _bytes_feauture(get_jpg_string(img)),\n",
    "                    'height': _int_feauture(img.shape[0]),\n",
    "                    'width': _int_feauture(img.shape[1]),\n",
    "                    # landmarks\n",
    "                    'n_landmarks': _int_feauture(len(pts)),\n",
    "                    'gt_pts': _bytes_feauture(pts.astype(np.float32).tobytes()),\n",
    "                    'gt_mask': _bytes_feauture(binary_mask.astype(np.float32).tobytes()),\n",
    "                    'mask_index': _bytes_feauture(mask_index.astype(np.float32).tobytes()),\n",
    "                    # status\n",
    "                    'status': _int_feauture(status),\n",
    "                        \n",
    "            }))\n",
    "            serialized = example.SerializeToString()\n",
    "            writer.write(serialized)\n",
    "    if store_records:\n",
    "        writer.close()\n",
    "\n",
    "generate(face_iterator(), record_save_path, record_name,  store_records=True)"
   ]
  }
 ],
 "metadata": {
  "anaconda-cloud": {},
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
