{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import sys\n",
    "import cv2\n",
    "import matplotlib\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "from tensorflow.keras.optimizers import Adam,SGD\n",
    "from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
    "from tensorflow.keras.layers import Lambda, Conv2D, Conv2DTranspose, Input, Dense, Dropout, multiply, Dot, Concatenate,\\\n",
    "    Add, GlobalAveragePooling2D,BatchNormalization, Activation, AveragePooling2D, UpSampling2D ,MaxPooling2D\n",
    "from tensorflow.keras.applications import ResNet50,MobileNetV2,Xception,NASNetLarge,InceptionResNetV2\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n",
    "gpus = tf.config.experimental.list_physical_devices('GPU')\n",
    "tf.config.experimental.set_memory_growth(gpus[0], True)\n",
    "def show_image(img_arr):\n",
    "    fig = plt.figure(figsize = (16,32))\n",
    "    plt.imshow(img_arr)\n",
    "    plt.axis('off')\n",
    "    plt.show()\n",
    "def load_image(path):\n",
    "    return matplotlib.image .imread(path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#hed\n",
    "def side_branch(x, factor):\n",
    "    x = Conv2D(1, (1, 1), activation=None, padding='same')(x)\n",
    "\n",
    "    kernel_size = (2*factor, 2*factor)\n",
    "    x = Conv2DTranspose(1, kernel_size, strides=factor, padding='same', use_bias=False, activation=None)(x)\n",
    "\n",
    "    return x \n",
    "def _to_tensor(x, dtype):\n",
    "    \n",
    "    x = tf.convert_to_tensor(x)\n",
    "    if x.dtype != dtype:\n",
    "        x = tf.cast(x, dtype)\n",
    "    return x \n",
    "def ofuse_pixel_error(y_true, y_pred):\n",
    "    pred = tf.cast(tf.greater(y_pred, 0.5), tf.int32)\n",
    "    error = tf.cast(tf.not_equal(pred, tf.cast(y_true, tf.int32)), tf.float32)\n",
    "    return tf.reduce_mean(error)\n",
    "def cross_entropy_balanced(y_true, y_pred):\n",
    "    _epsilon = _to_tensor(tf.keras.backend.epsilon(), y_pred.dtype.base_dtype)\n",
    "    y_pred   = tf.clip_by_value(y_pred, _epsilon, 1 - _epsilon)\n",
    "    y_pred   = tf.math.log(y_pred/ (1 - y_pred))\n",
    "\n",
    "    y_true = tf.cast(y_true, tf.float32)\n",
    "\n",
    "    count_neg = tf.reduce_sum(1. - y_true)\n",
    "    count_pos = tf.reduce_sum(y_true)\n",
    "\n",
    "    # Equation [2]\n",
    "    beta = count_neg / (count_neg + count_pos)\n",
    "\n",
    "    # Equation [2] divide by 1 - beta\n",
    "    pos_weight = beta / (1 - beta)\n",
    "\n",
    "    cost = tf.nn.weighted_cross_entropy_with_logits(labels=y_true, logits=y_pred, pos_weight=pos_weight)\n",
    "\n",
    "    # Multiply by 1 - beta\n",
    "    cost = tf.reduce_mean(cost * (1 - beta))\n",
    "\n",
    "    # check if image has no edge pixels return 0 else return complete error function\n",
    "    return tf.where(tf.equal(count_pos, 0.0), 0.0, cost)\n",
    "def hed(W,H,C):\n",
    "    # Input\n",
    "    img_input = Input(shape=(W,H,C), name='input')#480,480,3\n",
    "\n",
    "    # Block 1\n",
    "    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)\n",
    "    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)\n",
    "    b1= side_branch(x, 1) # 480 480 1\n",
    "    x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block1_pool')(x) # 240 240 64 #可用扩张卷积替代\n",
    "\n",
    "    # Block 2\n",
    "    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)\n",
    "    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)\n",
    "    b2= side_branch(x, 2) # 480 480 1\n",
    "    x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block2_pool')(x) # 120 120 128\n",
    "\n",
    "    # Block 3\n",
    "    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)\n",
    "    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)\n",
    "    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)\n",
    "    b3= side_branch(x, 4) # 480 480 1\n",
    "    x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block3_pool')(x) # 60 60 256\n",
    "\n",
    "    # Block 4\n",
    "    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)\n",
    "    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)\n",
    "    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)\n",
    "    b4= side_branch(x, 8) # 480 480 1\n",
    "    x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block4_pool')(x) # 30 30 512\n",
    "\n",
    "    # Block 5\n",
    "    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)\n",
    "    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)\n",
    "    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x) # 30 30 512\n",
    "    b5= side_branch(x, 16) # 480 480 1\n",
    "\n",
    "    # fuse\n",
    "    fuse = Concatenate(axis=-1)([b1, b2, b3, b4, b5])\n",
    "    fuse = Conv2D(1, (1,1), padding='same', use_bias=False, activation=None)(fuse) # 480 480 1\n",
    "\n",
    "    # outputs\n",
    "    o1    = Activation('sigmoid', name='o1')(b1)\n",
    "    o2    = Activation('sigmoid', name='o2')(b2)\n",
    "    o3    = Activation('sigmoid', name='o3')(b3)\n",
    "    o4    = Activation('sigmoid', name='o4')(b4)\n",
    "    o5    = Activation('sigmoid', name='o5')(b5)\n",
    "    ofuse = Activation('sigmoid', name='ofuse')(fuse)\n",
    "\n",
    "    # model\n",
    "#     model = Model(inputs=[img_input], outputs=ofuse)\n",
    "#     model.compile(loss={'ofuse': cross_entropy_balanced,},\n",
    "#                   metrics={'ofuse': ofuse_pixel_error},\n",
    "#                   optimizer='adam')\n",
    "\n",
    "    model = tf.keras.Model(inputs=[img_input], outputs=[o1, o2, o3, o4, o5, ofuse])\n",
    "    model.compile(loss={'o1': cross_entropy_balanced,\n",
    "                        'o2': cross_entropy_balanced,\n",
    "                        'o3': cross_entropy_balanced,\n",
    "                        'o4': cross_entropy_balanced,\n",
    "                        'o5': cross_entropy_balanced,\n",
    "                        'ofuse': cross_entropy_balanced,\n",
    "                        },\n",
    "                  metrics={'ofuse': ofuse_pixel_error},\n",
    "                  optimizer=tf.optimizers.Adam(lr = 0.00001))\n",
    "\n",
    "    return model\n",
    "Hed = hed(480, 640,3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "\n",
    "###########################train_set\n",
    "\n",
    "def trainGenerator(batch_size):\n",
    "\n",
    "    aug_dict = dict(horizontal_flip=True,\n",
    "                        fill_mode='nearest')\n",
    "    aug_dict = dict(horizontal_flip=True,\n",
    "                        fill_mode='nearest')\n",
    "\n",
    "    image_datagen = ImageDataGenerator(**aug_dict)\n",
    "    mask_datagen = ImageDataGenerator(**aug_dict)\n",
    "    image_generator = image_datagen.flow_from_directory(\n",
    "        \"F:/dataset/0_stone\",\n",
    "        classes=[\"img_test\"],\n",
    "        color_mode = \"rgb\",\n",
    "        target_size = (480, 640),\n",
    "        class_mode = None,\n",
    "        batch_size = batch_size, seed=1)\n",
    "\n",
    "    mask_generator = mask_datagen.flow_from_directory(\n",
    "        \"F:/dataset/0_stone\",\n",
    "        classes=[\"edge_test\"],\n",
    "        color_mode = \"grayscale\",\n",
    "        target_size = (480, 640),\n",
    "        class_mode = None,\n",
    "        batch_size = batch_size, seed=1)\n",
    "\n",
    "    train_generator = zip(image_generator, mask_generator)\n",
    "    for (img,mask) in train_generator:\n",
    "        img = img / 255.\n",
    "        mask = mask / 255.\n",
    "        mask[mask > 0.1] = 1\n",
    "        mask[mask <= 0.1] = 0  \n",
    "        mask = tf.cast(mask,dtype = tf.int32)\n",
    "        \n",
    "#         yield (img,mask)\n",
    "        yield (img,[mask, mask, mask, mask, mask, mask])\n",
    "\n",
    "trainset = trainGenerator(batch_size=1)\n",
    "# a,b = next(trainset)\n",
    "# print(a.shape,b.shape)\n",
    "# history = Hed.fit(trainset,steps_per_epoch=1300,epochs=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "Hed.load_weights('hed.h5')\n",
    "# history = Hed.fit(trainset,steps_per_epoch=100,epochs=1)\n",
    "# Hed.save_weights('hed.h5')\n",
    "a,[b,_,_,_,_,_] = next(trainset)\n",
    "o1, o2, o3, o4, o5, ofuse = Hed.predict(a)\n",
    "ofuse  = tf.squeeze(ofuse)\n",
    "print(tf.keras.backend.max(ofuse))\n",
    "show_image(tf.squeeze(a))\n",
    "show_image(tf.squeeze(ofuse))\n",
    "res = np.array(tf.squeeze(ofuse))*255"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
