{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import hashlib#hashlib 是一个提供了一些流行的hash算法的 Python 标准库．其中所包括的算法有 md5, sha1, sha224, sha256, sha384, sha512. 另外，模块中所定义的 new(name, string=”) 方法可通过指定系统所支持的hash算法来构造相应的hash对象．\n",
    "import io#输入输出\n",
    "import logging#日志模块\n",
    "import os#系统编程的操作模块\n",
    "import random#python产生伪随机数的模块\n",
    "import re#提供各种正则表达式的匹配操作,在文本解析、复杂字符串分析和信息等\n",
    "\n",
    "from lxml import etree#lxml是Python语言里和XML以及HTML工作的功能最丰富和最容易使用的库。\n",
    "import numpy as np#数据处理\n",
    "import PIL.Image#图像处理库，Image类的所有方法都将返回一个Image类的新实例，这个实例对应于结果图像。\n",
    "import tensorflow as tf\n",
    "\n",
    "from object_detection.utils import dataset_util\n",
    "from object_detection.utils import label_map_util\n",
    "\n",
    "flags = tf.app.flags\n",
    "flags.DEFINE_string('data_dir', '', 'Root directory to raw pet dataset.')#注册一个值可以是任何字符串的标志,添加命令行的optional argument（可选参数）\n",
    "flags.DEFINE_string('output_dir', '', 'Path to directory to output TFRecords.')\n",
    "flags.DEFINE_string('label_map_path', 'data/pet_label_map.pbtxt',\n",
    "                    'Path to label map proto')\n",
    "#flags.DEFINE_boolean('faces_only', True, 'If True, generates bounding boxes '\n",
    "                     'for pet faces.  Otherwise generates bounding boxes (as '\n",
    "                     'well as segmentations for full pet bodies).  Note that '\n",
    "                     'in the latter case, the resulting files are much larger.')\n",
    "FLAGS = flags.FLAGS#可以从对应的命令行参数取出参数\n",
    "\n",
    "\n",
    "# def get_class_name_from_filename(file_name):\n",
    "#   \"\"\"Gets the class name from a file.\n",
    "\n",
    "#   Args:\n",
    "#     file_name: The file name to get the class name from.\n",
    "#                ie. \"american_pit_bull_terrier_105.jpg\"\n",
    "\n",
    "#   Returns:\n",
    "#     A string of the class name.\n",
    "#   \"\"\"\n",
    "#   match = re.match(r'([A-Za-z_]+)(_[0-9]+\\.jpg)', file_name, re.I)\n",
    "#   return match.groups()[0]\n",
    "\n",
    "\n",
    "def dict_to_tf_example(data,\n",
    "                       mask_path,\n",
    "                       label_map_dict,\n",
    "                       image_subdirectory,\n",
    "                       ignore_difficult_instances=False,\n",
    "                       ):\n",
    "  \"\"\"Convert XML derived dict to tf.Example proto.\n",
    "\n",
    "  Notice that this function normalizes the bounding box coordinates provided\n",
    "  by the raw data.\n",
    "\n",
    "  Args:\n",
    "    data: dict holding PASCAL XML fields for a single image (obtained by\n",
    "      running dataset_util.recursive_parse_xml_to_dict)\n",
    "    mask_path: String path to PNG encoded mask.\n",
    "    label_map_dict: A map from string label names to integers ids.\n",
    "    image_subdirectory: String specifying subdirectory within the\n",
    "      Pascal dataset directory holding the actual image data.\n",
    "    ignore_difficult_instances: Whether to skip difficult instances in the\n",
    "      dataset  (default: False).\n",
    "    faces_only: If True, generates bounding boxes for pet faces.  Otherwise\n",
    "      generates bounding boxes (as well as segmentations for full pet bodies).\n",
    "\n",
    "  Returns:\n",
    "    example: The converted tf.Example.\n",
    "\n",
    "  Raises:\n",
    "    ValueError: if the image pointed to by data['filename'] is not a valid JPEG\n",
    "  \"\"\"\n",
    "  img_path = os.path.join(image_subdirectory, data['filename'])#将多个路径组合后返回\n",
    "  with tf.gfile.GFile(img_path, 'rb') as fid:\n",
    "    encoded_jpg = fid.read()\n",
    "  encoded_jpg_io = io.BytesIO(encoded_jpg)\n",
    "  image = PIL.Image.open(encoded_jpg_io)\n",
    "  if image.format != 'JPEG':\n",
    "    raise ValueError('Image format not JPEG')\n",
    "  key = hashlib.sha256(encoded_jpg).hexdigest()#哈希编码\n",
    "\n",
    "  with tf.gfile.GFile(mask_path, 'rb') as fid:\n",
    "    encoded_mask_png = fid.read()\n",
    "  encoded_png_io = io.BytesIO(encoded_mask_png)\n",
    "  mask = PIL.Image.open(encoded_png_io)\n",
    "  if mask.format != 'PNG':\n",
    "    raise ValueError('Mask format not PNG')\n",
    "\n",
    "  mask_np = np.asarray(mask)\n",
    "  nonbackground_indices_x = np.any(mask_np != 2, axis=0)\n",
    "  nonbackground_indices_y = np.any(mask_np != 2, axis=1)\n",
    "  nonzero_x_indices = np.where(nonbackground_indices_x)\n",
    "  nonzero_y_indices = np.where(nonbackground_indices_y)\n",
    "\n",
    "  width = int(data['size']['width'])\n",
    "  height = int(data['size']['height'])\n",
    "\n",
    "  xmins = []\n",
    "  ymins = []\n",
    "  xmaxs = []\n",
    "  ymaxs = []\n",
    "  classes = []\n",
    "  classes_text = []\n",
    "  truncated = []\n",
    "  poses = []\n",
    "  difficult_obj = []\n",
    "  masks = []\n",
    "  for obj in data['object']:\n",
    "    difficult = bool(int(obj['difficult']))\n",
    "    if ignore_difficult_instances and difficult:\n",
    "      continue\n",
    "    difficult_obj.append(int(difficult))\n",
    "\n",
    "#     if faces_only:\n",
    "#       xmin = float(obj['bndbox']['xmin'])\n",
    "#       xmax = float(obj['bndbox']['xmax'])\n",
    "#       ymin = float(obj['bndbox']['ymin'])\n",
    "#       ymax = float(obj['bndbox']['ymax'])\n",
    "    if !faces_only:\n",
    "      xmin = float(np.min(nonzero_x_indices))\n",
    "      xmax = float(np.max(nonzero_x_indices))\n",
    "      ymin = float(np.min(nonzero_y_indices))\n",
    "      ymax = float(np.max(nonzero_y_indices))\n",
    "\n",
    "    xmins.append(xmin / width)\n",
    "    ymins.append(ymin / height)\n",
    "    xmaxs.append(xmax / width)\n",
    "    ymaxs.append(ymax / height)\n",
    "    class_name = get_class_name_from_filename(data['filename'])\n",
    "    classes_text.append(class_name.encode('utf8'))\n",
    "    classes.append(label_map_dict[class_name])\n",
    "    truncated.append(int(obj['truncated']))\n",
    "    poses.append(obj['pose'].encode('utf8'))\n",
    "    if not faces_only:#因为是not,所以没删\n",
    "      mask_remapped = mask_np != 2\n",
    "      masks.append(mask_remapped)\n",
    "\n",
    "  feature_dict = {\n",
    "      'image/height': dataset_util.int64_feature(height),\n",
    "      'image/width': dataset_util.int64_feature(width),\n",
    "      'image/filename': dataset_util.bytes_feature(\n",
    "          data['filename'].encode('utf8')),\n",
    "      'image/source_id': dataset_util.bytes_feature(\n",
    "          data['filename'].encode('utf8')),\n",
    "      'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n",
    "      'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n",
    "      'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),\n",
    "      'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),\n",
    "      'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),\n",
    "      'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),\n",
    "      'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),\n",
    "      'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n",
    "      'image/object/class/label': dataset_util.int64_list_feature(classes),\n",
    "      'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),\n",
    "      'image/object/truncated': dataset_util.int64_list_feature(truncated),\n",
    "      'image/object/view': dataset_util.bytes_list_feature(poses),\n",
    "  }\n",
    "  if not faces_only:#因为是not,所以没删\n",
    "    mask_stack = np.stack(masks).astype(np.float32)\n",
    "    masks_flattened = np.reshape(mask_stack, [-1])\n",
    "    feature_dict['image/object/mask'] = (\n",
    "        dataset_util.float_list_feature(masks_flattened.tolist()))\n",
    "\n",
    "  example = tf.train.Example(features=tf.train.Features(feature=feature_dict))\n",
    "  return example\n",
    "\n",
    "\n",
    "def create_tf_record(output_filename,\n",
    "                     label_map_dict,\n",
    "                     annotations_dir,\n",
    "                     image_dir,\n",
    "                     examples,\n",
    "                     ):\n",
    "  \"\"\"Creates a TFRecord file from examples.\n",
    "\n",
    "  Args:\n",
    "    output_filename: Path to where output file is saved.\n",
    "    label_map_dict: The label map dictionary.\n",
    "    annotations_dir: Directory where annotation files are stored.\n",
    "    image_dir: Directory where image files are stored.\n",
    "    examples: Examples to parse and save to tf record.\n",
    "    faces_only: If True, generates bounding boxes for pet faces.  Otherwise\n",
    "      generates bounding boxes (as well as segmentations for full pet bodies).\n",
    "  \"\"\"\n",
    "  writer = tf.python_io.TFRecordWriter(output_filename)\n",
    "  for idx, example in enumerate(examples):\n",
    "    if idx % 100 == 0:\n",
    "      logging.info('On image %d of %d', idx, len(examples))\n",
    "    xml_path = os.path.join(annotations_dir, 'xmls', example + '.xml')\n",
    "    mask_path = os.path.join(annotations_dir, 'trimaps', example + '.png')\n",
    "\n",
    "    if not os.path.exists(xml_path):\n",
    "      logging.warning('Could not find %s, ignoring example.', xml_path)\n",
    "      continue\n",
    "    with tf.gfile.GFile(xml_path, 'r') as fid:\n",
    "      xml_str = fid.read()\n",
    "    xml = etree.fromstring(xml_str)\n",
    "    data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']\n",
    "\n",
    "    try:\n",
    "      tf_example = dict_to_tf_example(\n",
    "          data, mask_path, label_map_dict, image_dir)\n",
    "      writer.write(tf_example.SerializeToString())\n",
    "    except ValueError:\n",
    "      logging.warning('Invalid example: %s, ignoring.', xml_path)\n",
    "\n",
    "  writer.close()\n",
    "\n",
    "\n",
    "# TODO(derekjchow): Add test for pet/PASCAL main files.\n",
    "def main(_):\n",
    "  data_dir = FLAGS.data_dir\n",
    "  label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path)\n",
    "\n",
    "  logging.info('Reading from Pet dataset.')\n",
    "  image_dir = os.path.join(data_dir, 'images')\n",
    "  annotations_dir = os.path.join(data_dir, 'annotations')\n",
    "  examples_path = os.path.join(annotations_dir, 'trainval.txt')\n",
    "  examples_list = dataset_util.read_examples_list(examples_path)\n",
    "\n",
    "  # Test images are not included in the downloaded data set, so we shall perform\n",
    "  # our own split.\n",
    "  random.seed(42)\n",
    "  random.shuffle(examples_list)\n",
    "  num_examples = len(examples_list)\n",
    "  num_train = int(0.7 * num_examples)\n",
    "  train_examples = examples_list[:num_train]\n",
    "  val_examples = examples_list[num_train:]\n",
    "  logging.info('%d training and %d validation examples.',\n",
    "               len(train_examples), len(val_examples))\n",
    "\n",
    "  train_output_path = os.path.join(FLAGS.output_dir, 'pet_train.record')\n",
    "  val_output_path = os.path.join(FLAGS.output_dir, 'pet_val.record')\n",
    "#   if FLAGS.faces_only:\n",
    "#     train_output_path = os.path.join(FLAGS.output_dir,\n",
    "#                                      'pet_train_with_masks.record')\n",
    "#     val_output_path = os.path.join(FLAGS.output_dir,\n",
    "#                                    'pet_val_with_masks.record')\n",
    "#   create_tf_record(train_output_path, label_map_dict, annotations_dir,\n",
    "#                    image_dir, train_examples, faces_only=FLAGS.faces_only)\n",
    "#   create_tf_record(val_output_path, label_map_dict, annotations_dir,\n",
    "#                    image_dir, val_examples, faces_only=FLAGS.faces_only)\n",
    "\n",
    "\n",
    "if __name__ == '__main__':\n",
    "  tf.app.run()\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
