{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "import os\n",
    "os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n",
    "import tensorflow as tf\n",
    "gpu_options = tf.GPUOptions(allow_growth=True)\n",
    "sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n",
    "\n",
    "from PIL import Image\n",
    "import os.path as osp\n",
    "import glob  \n",
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "from tqdm import tqdm\n",
    "import cv2\n",
    "from keras.models import load_model\n",
    "from helper import *\n",
    "\n",
    "mod = load_model('mod.h5')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def RandomCenterCrop(path, min_size, max_size):\n",
    "    '''\n",
    "    simulate dataset step 1: Crop Randomly\n",
    "    '''\n",
    "    size = np.random.randint(min_size, max_size)\n",
    "    \n",
    "    img = cv2.imread(path)\n",
    "    h, w, _ = img.shape\n",
    "\n",
    "    top = np.random.randint(0, h - size)\n",
    "    left = np.random.randint(0, w - size)\n",
    "\n",
    "    return img[top:size+top, left:size+left, :]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_patch(path, min_patch_size, max_patch_size):\n",
    "    '''\n",
    "    get patch from clothes\n",
    "    '''\n",
    "    patch_size = np.random.randint(min_patch_size, max_patch_size)\n",
    "    \n",
    "    img = cv2.imread(path)\n",
    "    h, w, _ = img.shape\n",
    "    \n",
    "    center_h = h/2\n",
    "    center_w = w/2\n",
    "    \n",
    "    patch = img[int(center_h - patch_size/2):int(center_h + patch_size/2), int(center_w - patch_size/2):int(center_w + patch_size/2), :]\n",
    "    \n",
    "    return patch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def edge_detecton(path):\n",
    "    '''\n",
    "    get sketch\n",
    "    '''\n",
    "    from_mat = cv2.imread(path)\n",
    "    width = float(from_mat.shape[1])\n",
    "    height = float(from_mat.shape[0])\n",
    "    new_width = 0\n",
    "    new_height = 0\n",
    "    if (width > height):\n",
    "        from_mat = cv2.resize(from_mat, (512, int(512 / width * height)), interpolation=cv2.INTER_AREA)\n",
    "        new_width = 512\n",
    "        new_height = int(512 / width * height)\n",
    "    else:\n",
    "        from_mat = cv2.resize(from_mat, (int(512 / height * width), 512), interpolation=cv2.INTER_AREA)\n",
    "        new_width = int(512 / height * width)\n",
    "        new_height = 512\n",
    "    from_mat = from_mat.transpose((2, 0, 1))\n",
    "    light_map = np.zeros(from_mat.shape, dtype=np.float)\n",
    "    for channel in range(3):\n",
    "        light_map[channel] = get_light_map_single(from_mat[channel])\n",
    "    light_map = normalize_pic(light_map)\n",
    "    light_map = resize_img_512_3d(light_map)\n",
    "    line_mat = mod.predict(light_map, batch_size=1)\n",
    "    line_mat = line_mat.transpose((3, 1, 2, 0))[0]\n",
    "    line_mat = line_mat[0:int(new_height), 0:int(new_width), :]\n",
    "    \n",
    "    line_mat = np.amax(line_mat, 2)\n",
    "\n",
    "    sketchKeras = show_active_img_and_save_denoise('sketchKeras', line_mat, 'sketchKeras.jpg')\n",
    "\n",
    "    return sketchKeras"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_mask(path):\n",
    "    '''\n",
    "    提取衣服的mask\n",
    "    返回numpy数组\n",
    "    '''\n",
    "    from linefiller.trappedball_fill import trapped_ball_fill_multi, flood_fill_multi, mark_fill, build_fill_map, merge_fill, \\\n",
    "    show_fill_map\n",
    "    from linefiller.thinning import thinning\n",
    "\n",
    "    im = cv2.imread(path, cv2.IMREAD_GRAYSCALE)\n",
    "    ret, binary = cv2.threshold(im, 220, 255, cv2.THRESH_BINARY)\n",
    "\n",
    "    fills = []\n",
    "    result = binary\n",
    "\n",
    "    fill = trapped_ball_fill_multi(result, 3, method='max')\n",
    "    fills += fill\n",
    "    result = mark_fill(result, fill)\n",
    "\n",
    "    fill = trapped_ball_fill_multi(result, 2, method=None)\n",
    "    fills += fill\n",
    "    result = mark_fill(result, fill)\n",
    "\n",
    "    fill = trapped_ball_fill_multi(result, 1, method=None)\n",
    "    fills += fill\n",
    "    result = mark_fill(result, fill)\n",
    "\n",
    "    fill = flood_fill_multi(result)\n",
    "    fills += fill\n",
    "\n",
    "    fillmap = build_fill_map(result, fills)\n",
    "\n",
    "    fillmap = merge_fill(fillmap)\n",
    "\n",
    "\n",
    "    for i in range(len(fillmap[:,0])):\n",
    "        for j in range(len(fillmap[0,:])):\n",
    "            if fillmap[i,j] == 1:\n",
    "                fillmap[i,j] = 0\n",
    "            else:\n",
    "                fillmap[i,j] = 1\n",
    "    \n",
    "    return fillmap"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "source_data_path = \"original image path\"#\"/data4/wangpengxiao/danbooru2017/original\"\n",
    "source_img_path = glob.glob(osp.join(source_data_path,'*/*.jpg'))\n",
    "source_img_path += glob.glob(osp.join(source_data_path,'*/*.png'))\n",
    "source_img_path = sorted(source_img_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#simulate step1 of the paper:1 "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "random_crop_path = \"random crop save path\"#\"/data4/wangpengxiao/zalando_random_crop\"\n",
    "patch_path = \"small path save path\"#\"/data4/wangpengxiao/zalando_center_patch\"\n",
    "for path in  tqdm(source_img_path):\n",
    "    try:\n",
    "    #step1_1： make randomly croped rectangular patches \n",
    "        r_im = RandomCenterCrop(path, 64, 256)\n",
    "        cv2.imwrite(osp.join(random_crop_path, osp.basename(path)), r_im)\n",
    "    #step1_2： make randomly croped rectangular patches    \n",
    "        p_im = get_patch(path, 64, 256)\n",
    "        cv2.imwrite(osp.join(patch_path, osp.basename(path)), p_im)\n",
    "    except:\n",
    "        os.system(\"rm \"+path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#simulate step1 of the paper:2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from linefiller.trappedball_fill import trapped_ball_fill_multi, flood_fill_multi, mark_fill, build_fill_map, merge_fill, \\\n",
    "    show_fill_map\n",
    "from linefiller.thinning import thinning\n",
    "def get_region_picture(path):\n",
    "    '''\n",
    "    获取不规则形状的图片，背景是黑色0，方便rotate\n",
    "    '''\n",
    "    im = cv2.imread(path, cv2.IMREAD_GRAYSCALE)\n",
    "    ret, binary = cv2.threshold(im, 200, 255, cv2.THRESH_BINARY)\n",
    "\n",
    "    fills = []\n",
    "    result = binary\n",
    "\n",
    "    fill = trapped_ball_fill_multi(result, 3, method='max')\n",
    "    fills += fill\n",
    "    result = mark_fill(result, fill)\n",
    "\n",
    "    fill = trapped_ball_fill_multi(result, 2, method=None)\n",
    "    fills += fill\n",
    "    result = mark_fill(result, fill)\n",
    "\n",
    "    fill = trapped_ball_fill_multi(result, 1, method=None)\n",
    "    fills += fill\n",
    "    result = mark_fill(result, fill)\n",
    "\n",
    "    fill = flood_fill_multi(result)\n",
    "    fills += fill\n",
    "\n",
    "    fillmap = build_fill_map(result, fills)\n",
    "\n",
    "    fillmap = merge_fill(fillmap)\n",
    "\n",
    "    fillmap = thinning(fillmap)\n",
    "\n",
    "    #获得region mask\n",
    "    for i in range(len(fillmap[:,0])):\n",
    "        for j in range(len(fillmap[0,:])):\n",
    "            if fillmap[i,j] == 0:\n",
    "                fillmap[i,j] = 1\n",
    "            else:\n",
    "                fillmap[i,j] = 0\n",
    "    #获得region picture    \n",
    "    im = cv2.imread(path)\n",
    "    rgb_fillmap = np.zeros(im.shape)\n",
    "    rgb_fillmap[:,:,0] = fillmap\n",
    "    rgb_fillmap[:,:,1] = fillmap\n",
    "    rgb_fillmap[:,:,2] = fillmap\n",
    "    im = im * rgb_fillmap\n",
    "    \n",
    "    return im.astype('uint8')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "region_picture_path = \"region patch save path\"#\"/data4/wangpengxiao/danbooru2017/original_region_picture\"\n",
    "for path in tqdm(source_img_path):\n",
    "    rp_im = get_region_picture(path)\n",
    "    cv2.imwrite(osp.join(region_picture_path, osp.basename(path)), rp_im)\n",
    "    "
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
