{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "11387225",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using TensorFlow backend.\n",
      "/usr/local/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n",
      "/usr/local/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n",
      "/usr/local/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:528: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n",
      "/usr/local/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:529: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n",
      "/usr/local/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:530: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n",
      "/usr/local/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:535: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n"
     ]
    }
   ],
   "source": [
    "from gernerate_data import split_train_and_test, load_SEG_data, load_SEG_data_for_test\n",
    "from sklearn.metrics import classification_report, auc, roc_curve\n",
    "# from models.Unet import UNet, UNet_IBA\n",
    "# from models.att_Unet import att_UNet\n",
    "# from models.r2_unet import r2_UNet\n",
    "from keras.losses import categorical_crossentropy, sparse_categorical_crossentropy, binary_crossentropy\n",
    "from keras.callbacks import LearningRateScheduler\n",
    "from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping, CSVLogger\n",
    "from keras.optimizers import sgd, Adam\n",
    "from utils.losses import dice_coef_loss, dice_coef, AUC, focal_tversky\n",
    "from sklearn.model_selection import train_test_split\n",
    "import tensorflow as tf\n",
    "import cv2 as cv\n",
    "import numpy as np\n",
    "import os\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "713e3dcd",
   "metadata": {},
   "outputs": [],
   "source": [
    "from keras.applications import VGG16\n",
    "from keras.layers import *\n",
    "from keras import Model, Sequential\n",
    "def UNet(input_width, input_height, nClasses):\n",
    "    assert input_height % 32 == 0\n",
    "    assert input_width % 32 == 0\n",
    "\n",
    "#     model = Sequential()\n",
    "    inputShape = (height, width, 3)\n",
    "\n",
    "    model = VGG16(include_top=False, weights='imagenet', input_shape=inputShape)\n",
    "    for layer in model.layers:\n",
    "        layer.trainable = True  # 涓嶈皟鏁翠箣鍓嶇殑鍗风Н灞傜殑鍙傛暟\n",
    "\n",
    "    o = UpSampling2D((2, 2))(model.get_layer(name=\"block5_conv3\").output)\n",
    "    o = concatenate([model.get_layer(name=\"block4_conv3\").output, o], axis=-1)\n",
    "    o = Conv2D(512, (3, 3), padding=\"same\")(o)\n",
    "    o = BatchNormalization()(o)\n",
    "\n",
    "    o = UpSampling2D((2, 2))(o)\n",
    "    o = concatenate([model.get_layer(name=\"block3_conv3\").output, o], axis=-1)\n",
    "    o = Conv2D(256, (3, 3), padding=\"same\")(o)\n",
    "    o = BatchNormalization()(o)\n",
    "\n",
    "    o = UpSampling2D((2, 2))(o)\n",
    "    o = concatenate([model.get_layer(name=\"block2_conv2\").output, o], axis=-1)\n",
    "    o = Conv2D(128, (3, 3), padding=\"same\")(o)\n",
    "    # o = IBALayer()(o)\n",
    "    o = BatchNormalization()(o)\n",
    "\n",
    "    o = UpSampling2D((2, 2))(o)\n",
    "    o = concatenate([model.get_layer(name=\"block1_conv2\").output, o], axis=-1)\n",
    "    o = Conv2D(64, (3, 3), padding=\"same\")(o)\n",
    "    o = BatchNormalization()(o)\n",
    "\n",
    "    o = Conv2D(64, (3, 3), padding=\"same\")(o)\n",
    "    o = BatchNormalization()(o)\n",
    "\n",
    "    o = Conv2D(nClasses, (3, 3), activation='softmax', padding='same', name='segmentation_output')(o)\n",
    "\n",
    "    model = Model(inputs=model.input, outputs=o)\n",
    "    return model\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "05155c8d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /usr/local/lib/python3.7/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Colocations handled automatically by placer.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2022-04-12 13:28:47.417333: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA\n",
      "2022-04-12 13:28:47.449320: I tensorflow/core/platform/profile_utils/cpu_utils.cc:94] CPU Frequency: 2200095000 Hz\n",
      "2022-04-12 13:28:47.452817: I tensorflow/compiler/xla/service/service.cc:150] XLA service 0x680a060 executing computations on platform Host. Devices:\n",
      "2022-04-12 13:28:47.452895: I tensorflow/compiler/xla/service/service.cc:158]   StreamExecutor device (0): <undefined>, <undefined>\n",
      "2022-04-12 13:28:47.990709: I tensorflow/compiler/xla/service/service.cc:150] XLA service 0x68b6080 executing computations on platform CUDA. Devices:\n",
      "2022-04-12 13:28:47.990767: I tensorflow/compiler/xla/service/service.cc:158]   StreamExecutor device (0): Tesla V100-PCIE-32GB, Compute Capability 7.0\n",
      "2022-04-12 13:28:47.991830: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1433] Found device 0 with properties: \n",
      "name: Tesla V100-PCIE-32GB major: 7 minor: 0 memoryClockRate(GHz): 1.38\n",
      "pciBusID: 0000:09:00.0\n",
      "totalMemory: 31.72GiB freeMemory: 28.55GiB\n",
      "2022-04-12 13:28:47.991900: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1512] Adding visible gpu devices: 0\n",
      "2022-04-12 13:28:47.998314: I tensorflow/core/common_runtime/gpu/gpu_device.cc:984] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
      "2022-04-12 13:28:47.998398: I tensorflow/core/common_runtime/gpu/gpu_device.cc:990]      0 \n",
      "2022-04-12 13:28:47.998415: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1003] 0:   N \n",
      "2022-04-12 13:28:47.999396: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 27778 MB memory) -> physical GPU (device: 0, name: Tesla V100-PCIE-32GB, pci bus id: 0000:09:00.0, compute capability: 7.0)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "__________________________________________________________________________________________________\n",
      "Layer (type)                    Output Shape         Param #     Connected to                     \n",
      "==================================================================================================\n",
      "input_1 (InputLayer)            (None, 224, 224, 3)  0                                            \n",
      "__________________________________________________________________________________________________\n",
      "block1_conv1 (Conv2D)           (None, 224, 224, 64) 1792        input_1[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "block1_conv2 (Conv2D)           (None, 224, 224, 64) 36928       block1_conv1[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "block1_pool (MaxPooling2D)      (None, 112, 112, 64) 0           block1_conv2[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "block2_conv1 (Conv2D)           (None, 112, 112, 128 73856       block1_pool[0][0]                \n",
      "__________________________________________________________________________________________________\n",
      "block2_conv2 (Conv2D)           (None, 112, 112, 128 147584      block2_conv1[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "block2_pool (MaxPooling2D)      (None, 56, 56, 128)  0           block2_conv2[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "block3_conv1 (Conv2D)           (None, 56, 56, 256)  295168      block2_pool[0][0]                \n",
      "__________________________________________________________________________________________________\n",
      "block3_conv2 (Conv2D)           (None, 56, 56, 256)  590080      block3_conv1[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "block3_conv3 (Conv2D)           (None, 56, 56, 256)  590080      block3_conv2[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "block3_pool (MaxPooling2D)      (None, 28, 28, 256)  0           block3_conv3[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "block4_conv1 (Conv2D)           (None, 28, 28, 512)  1180160     block3_pool[0][0]                \n",
      "__________________________________________________________________________________________________\n",
      "block4_conv2 (Conv2D)           (None, 28, 28, 512)  2359808     block4_conv1[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "block4_conv3 (Conv2D)           (None, 28, 28, 512)  2359808     block4_conv2[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "block4_pool (MaxPooling2D)      (None, 14, 14, 512)  0           block4_conv3[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "block5_conv1 (Conv2D)           (None, 14, 14, 512)  2359808     block4_pool[0][0]                \n",
      "__________________________________________________________________________________________________\n",
      "block5_conv2 (Conv2D)           (None, 14, 14, 512)  2359808     block5_conv1[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "block5_conv3 (Conv2D)           (None, 14, 14, 512)  2359808     block5_conv2[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "up_sampling2d_1 (UpSampling2D)  (None, 28, 28, 512)  0           block5_conv3[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "concatenate_1 (Concatenate)     (None, 28, 28, 1024) 0           block4_conv3[0][0]               \n",
      "                                                                 up_sampling2d_1[0][0]            \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_1 (Conv2D)               (None, 28, 28, 512)  4719104     concatenate_1[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_1 (BatchNor (None, 28, 28, 512)  2048        conv2d_1[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "up_sampling2d_2 (UpSampling2D)  (None, 56, 56, 512)  0           batch_normalization_1[0][0]      \n",
      "__________________________________________________________________________________________________\n",
      "concatenate_2 (Concatenate)     (None, 56, 56, 768)  0           block3_conv3[0][0]               \n",
      "                                                                 up_sampling2d_2[0][0]            \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_2 (Conv2D)               (None, 56, 56, 256)  1769728     concatenate_2[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_2 (BatchNor (None, 56, 56, 256)  1024        conv2d_2[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "up_sampling2d_3 (UpSampling2D)  (None, 112, 112, 256 0           batch_normalization_2[0][0]      \n",
      "__________________________________________________________________________________________________\n",
      "concatenate_3 (Concatenate)     (None, 112, 112, 384 0           block2_conv2[0][0]               \n",
      "                                                                 up_sampling2d_3[0][0]            \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_3 (Conv2D)               (None, 112, 112, 128 442496      concatenate_3[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_3 (BatchNor (None, 112, 112, 128 512         conv2d_3[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "up_sampling2d_4 (UpSampling2D)  (None, 224, 224, 128 0           batch_normalization_3[0][0]      \n",
      "__________________________________________________________________________________________________\n",
      "concatenate_4 (Concatenate)     (None, 224, 224, 192 0           block1_conv2[0][0]               \n",
      "                                                                 up_sampling2d_4[0][0]            \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_4 (Conv2D)               (None, 224, 224, 64) 110656      concatenate_4[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_4 (BatchNor (None, 224, 224, 64) 256         conv2d_4[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_5 (Conv2D)               (None, 224, 224, 64) 36928       batch_normalization_4[0][0]      \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_5 (BatchNor (None, 224, 224, 64) 256         conv2d_5[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "segmentation_output (Conv2D)    (None, 224, 224, 2)  1154        batch_normalization_5[0][0]      \n",
      "==================================================================================================\n",
      "Total params: 21,798,850\n",
      "Trainable params: 21,796,802\n",
      "Non-trainable params: 2,048\n",
      "__________________________________________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "INIT_LR = 1e-4\n",
    "EPOCHS = 200\n",
    "BS = 8\n",
    "width = 224\n",
    "height = 224\n",
    "depth = 3\n",
    "target = (width, height)\n",
    "shuffle = True\n",
    "num_classes = 2\n",
    "Name = 'UNet'\n",
    "# Name = 'UNet_IBA_DESm'\n",
    "preds_savePath = \"./seg_prediction/LE/UNet\"\n",
    "\n",
    "os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\" \n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n",
    "\n",
    "import keras\n",
    "keras.backend.clear_session()\n",
    "\n",
    "\n",
    "model = UNet(width, height, num_classes)\n",
    "# model = att_UNet(width, height, num_classes)\n",
    "# model = UNet_IBA(width, height, num_classes)\n",
    "# model = r2_UNet(width, height, num_classes)\n",
    "# weight_path = \"UNet_IBA_DESm-031-0.9417.h5\"\n",
    "# model.load_weights(weight_path)\n",
    "model.summary()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "69471990",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(\"------------------------------ Data loading ------------------------------------\")\n",
    "\n",
    "# trainX_dir = 'dataset/Dataset_BUSI_AN/train/images/'\n",
    "# trainX_dir = 'dataset/DES_aug/train/images/'\n",
    "trainX_dir = 'dataset/DESm/train/images/'\n",
    "trainY_dir = trainX_dir.replace('images', 'masks')\n",
    "\n",
    "valX_dir = trainX_dir.replace('train', 'val')\n",
    "valY_dir = valX_dir.replace('images', 'masks')\n",
    "\n",
    "testX_dir = trainX_dir.replace('train', 'test')\n",
    "testY_dir = testX_dir.replace('images', 'masks')\n",
    "\n",
    "train_X, train_Y, _ = load_SEG_data(trainX_dir, target=target, shuffle=True)\n",
    "#(train_X, val_X, train_Y, val_Y) = train_test_split(train_X, train_Y, test_size=0.2)\n",
    "val_X, val_Y, _ = load_SEG_data(valX_dir, target=target, shuffle=True)\n",
    "test_X, test_Y, test_img_Names, test_orignal_images = load_SEG_data_for_test(testX_dir, target=target, shuffle=False)\n",
    "\n",
    "print(\"train_X shape:\", train_X.shape)\n",
    "print(\"train_X shape:\", train_Y.shape)\n",
    "print(\"val_X shape:\", val_X.shape)\n",
    "print(\"val_Y shape:\", val_Y.shape)\n",
    "print(\"test_X shape:\", test_X.shape)\n",
    "print(\"test_Y shape:\", test_Y.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "09ce02b5",
   "metadata": {},
   "outputs": [],
   "source": [
    "def binary_crossentropy(y_true, y_pred):\n",
    "    e=1.0\n",
    "    return K.mean(-(y_true*K.log(y_pred+K.epsilon())+\n",
    "                    e*(1-y_true)*K.log(1-y_pred+K.epsilon())),\n",
    "                  axis=-1)\n",
    "\n",
    "def tversky(y_true, y_pred, smooth=1):\n",
    "    y_true_pos = K.flatten(y_true)\n",
    "    y_pred_pos = K.flatten(y_pred)\n",
    "    true_pos = K.sum(y_true_pos * y_pred_pos)\n",
    "    false_neg = K.sum(y_true_pos * (1 - y_pred_pos))\n",
    "    false_pos = K.sum((1 - y_true_pos) * y_pred_pos)\n",
    "    alpha = 0.5\n",
    "    return (true_pos + smooth) / (true_pos + alpha * false_neg + (1 - alpha) * false_pos + smooth)\n",
    "\n",
    "def tversky_loss(y_true, y_pred):\n",
    "    return 1 - tversky(y_true, y_pred)\n",
    "\n",
    "\n",
    "\n",
    "# define callbacks\n",
    "csv_logger = CSVLogger(Name+'.log')\n",
    "\n",
    "reduce_lr = ReduceLROnPlateau(monitor='dice_coef', factor=0.1, patience=30, min_lr=1e-8, mode='auto', verbose=1)\n",
    "\n",
    "checkpoint_period1 = ModelCheckpoint(Name + '-{epoch:03d}-{val_acc:.4f}.h5',\n",
    "                                     monitor='val_acc', mode='auto', save_best_only='True')\n",
    "\n",
    "checkpoint_period2 = ModelCheckpoint(Name + '-{epoch:03d}-{val_acc:.4f}.h5',\n",
    "                                     monitor='val_acc', mode='auto', period=20)\n",
    "\n",
    "# model.compile(loss=[tversky_dice_loss], optimizer=sgd, metrics=['accuracy', dice_coef])\n",
    "# model.compile(optimizer=Adam(lr=INIT_LR), loss=binary_crossentropy, metrics=['accuracy', dice_coef, AUC])\n",
    "model.compile(optimizer=Adam(lr=INIT_LR), loss=tversky_loss, metrics=['accuracy', dice_coef])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "629d9f45",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "================================================ begin training ================================================\n",
      "WARNING:tensorflow:From /usr/local/lib/python3.7/site-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use tf.cast instead.\n",
      "WARNING:tensorflow:From /usr/local/lib/python3.7/site-packages/tensorflow/python/ops/math_grad.py:102: div (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Deprecated in favor of operator or tf.math.divide.\n",
      "Train on 678 samples, validate on 101 samples\n",
      "Epoch 1/200\n"
     ]
    },
    {
     "ename": "InvalidArgumentError",
     "evalue": "Incompatible shapes: [401408] vs. [802816]\n\t [[{{node loss/segmentation_output_loss/mul}}]]\n\t [[{{node loss/mul}}]]",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mInvalidArgumentError\u001b[0m                      Traceback (most recent call last)",
      "\u001b[0;32m/tmp/ipykernel_36615/2408717691.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m      8\u001b[0m                     \u001b[0mverbose\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      9\u001b[0m                     \u001b[0mcallbacks\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mcheckpoint_period1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcheckpoint_period2\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mreduce_lr\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcsv_logger\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 10\u001b[0;31m                     shuffle=True)\n\u001b[0m\u001b[1;32m     11\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/usr/local/lib/python3.7/site-packages/keras/engine/training.py\u001b[0m in \u001b[0;36mfit\u001b[0;34m(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)\u001b[0m\n\u001b[1;32m   1037\u001b[0m                                         \u001b[0minitial_epoch\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minitial_epoch\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1038\u001b[0m                                         \u001b[0msteps_per_epoch\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msteps_per_epoch\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1039\u001b[0;31m                                         validation_steps=validation_steps)\n\u001b[0m\u001b[1;32m   1040\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1041\u001b[0m     def evaluate(self, x=None, y=None,\n",
      "\u001b[0;32m/usr/local/lib/python3.7/site-packages/keras/engine/training_arrays.py\u001b[0m in \u001b[0;36mfit_loop\u001b[0;34m(model, f, ins, out_labels, batch_size, epochs, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics, initial_epoch, steps_per_epoch, validation_steps)\u001b[0m\n\u001b[1;32m    197\u001b[0m                     \u001b[0mins_batch\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mins_batch\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtoarray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    198\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 199\u001b[0;31m                 \u001b[0mouts\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mins_batch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    200\u001b[0m                 \u001b[0mouts\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mto_list\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mouts\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    201\u001b[0m                 \u001b[0;32mfor\u001b[0m \u001b[0ml\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mo\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mzip\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mout_labels\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mouts\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/usr/local/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, inputs)\u001b[0m\n\u001b[1;32m   2713\u001b[0m                 \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_legacy_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   2714\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2715\u001b[0;31m             \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   2716\u001b[0m         \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   2717\u001b[0m             \u001b[0;32mif\u001b[0m \u001b[0mpy_any\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mis_tensor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mx\u001b[0m \u001b[0;32min\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/usr/local/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py\u001b[0m in \u001b[0;36m_call\u001b[0;34m(self, inputs)\u001b[0m\n\u001b[1;32m   2673\u001b[0m             \u001b[0mfetched\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_callable_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0marray_vals\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun_metadata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   2674\u001b[0m         \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2675\u001b[0;31m             \u001b[0mfetched\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_callable_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0marray_vals\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   2676\u001b[0m         \u001b[0;32mreturn\u001b[0m \u001b[0mfetched\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   2677\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/usr/local/lib/python3.7/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1437\u001b[0m           ret = tf_session.TF_SessionRunCallable(\n\u001b[1;32m   1438\u001b[0m               \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_handle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstatus\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1439\u001b[0;31m               run_metadata_ptr)\n\u001b[0m\u001b[1;32m   1440\u001b[0m         \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1441\u001b[0m           \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/usr/local/lib/python3.7/site-packages/tensorflow/python/framework/errors_impl.py\u001b[0m in \u001b[0;36m__exit__\u001b[0;34m(self, type_arg, value_arg, traceback_arg)\u001b[0m\n\u001b[1;32m    526\u001b[0m             \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    527\u001b[0m             \u001b[0mcompat\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_text\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mc_api\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_Message\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstatus\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstatus\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 528\u001b[0;31m             c_api.TF_GetCode(self.status.status))\n\u001b[0m\u001b[1;32m    529\u001b[0m     \u001b[0;31m# Delete the underlying status object from memory otherwise it stays alive\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    530\u001b[0m     \u001b[0;31m# as there is a reference to status from this from the traceback due to\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mInvalidArgumentError\u001b[0m: Incompatible shapes: [401408] vs. [802816]\n\t [[{{node loss/segmentation_output_loss/mul}}]]\n\t [[{{node loss/mul}}]]"
     ]
    }
   ],
   "source": [
    "print(\"================================================ begin training ================================================\")\n",
    "\n",
    "history = model.fit(train_X, train_Y,\n",
    "                    # initial_epoch=60,\n",
    "                    batch_size=BS,\n",
    "                    epochs=EPOCHS,\n",
    "                    validation_data=(val_X, val_Y),\n",
    "                    verbose=1,\n",
    "                    callbacks=[checkpoint_period1, checkpoint_period2, reduce_lr, csv_logger],\n",
    "                    shuffle=True)\n",
    "                    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1a377996",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(\"================================================ Saving model ================================================\")\n",
    "\n",
    "# save the model\n",
    "# model.save( Name + '.h5')\n",
    "# model.load_weights('U-net_BUSI_224x224.h5', by_name = True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fec34cab",
   "metadata": {},
   "outputs": [],
   "source": [
    "# evaluate the model\n",
    "loss, acc, dice_coef = model.evaluate(test_X, test_Y, verbose=0)\n",
    "print('Test loss:', loss)\n",
    "print('Test accuracy:', acc)\n",
    "print('Test dice_coef:', dice_coef)\n",
    "\n",
    "# test the model and gernerate results \n",
    "preds = model.predict(test_X, batch_size=4)\n",
    "\n",
    "\n",
    "preds_s = model.predict(test_X, batch_size=8, verbose=1)\n",
    "test_mask = test_Y.flatten()\n",
    "pred_mask = preds_s.flatten()\n",
    "fpr, tpr, thresholds = roc_curve(test_mask, pred_mask, pos_label=1)\n",
    "roc_auc = auc(fpr, tpr)\n",
    "print(\"AUC:\", roc_auc)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1a47c439",
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "\n",
    "\n",
    "N = np.arange(0, 2)\n",
    "plt.style.use(\"ggplot\")\n",
    "plt.figure()\n",
    "plt.plot(N, history.history[\"loss\"], label=\"train_loss\")\n",
    "plt.plot(N, history.history[\"val_loss\"], label=\"val_loss\")\n",
    "plt.plot(N, history.history[\"acc\"], label=\"train_acc\")\n",
    "plt.plot(N, history.history[\"val_acc\"], label=\"val_acc\")\n",
    "# plt.title(\"Training Loss and Accuracy\")\n",
    "plt.xlabel(\"Epoch #\")\n",
    "plt.ylabel(\"Loss/Accuracy\")\n",
    "plt.legend()\n",
    "plt.savefig('./output/' + Name + '_plot.png')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "832a72c7",
   "metadata": {},
   "outputs": [],
   "source": [
    "# def mask2gray(mask, input_type=None):\n",
    "#     if input_type is 'pred':\n",
    "#         mask0 = mask[:, :, 0]\n",
    "#         mask1 = mask[:, :, 1]\n",
    "#         mask = mask1 - mask0\n",
    "#         print(\"mask.shape: \", mask.shape)\n",
    "#         #mask = np.argmax(mask, axis=-1)\n",
    "#         # print( mask[mask > 0.5] )\n",
    "#         mask[mask > 0] = 255\n",
    "#         mask[mask <= 0] = 0\n",
    "#     mask = mask.astype(dtype=np.uint8)\n",
    "#     rst = mask.copy()\n",
    "#     cv.normalize(mask, rst, alpha=0, beta=255, norm_type=cv.NORM_MINMAX)\n",
    "#     return rst\n",
    "\n",
    "# print(\"------------------------------ Saving predictions ------------------------------------\")\n",
    "\n",
    "# img_size = (width, height)\n",
    "# for i in range(test_X.shape[0]):\n",
    "#     img = test_orignal_images[i]\n",
    "\n",
    "#     gt = test_Y[i, :, :, 1]\n",
    "#     gt = mask2gray(gt)\n",
    "\n",
    "#     pred = preds[i]\n",
    "#     prediction = mask2gray(pred, input_type='pred')\n",
    "\n",
    "#     save_img = np.zeros([img_size[0], img_size[1]*3, 3], dtype=np.uint8)\n",
    "#     save_img[:, 0:img_size[0], :] = img[:, :, ::-1]\n",
    "#     save_img[:, img_size[0]:img_size[1] * 2, :] = cv.cvtColor(gt, cv.COLOR_GRAY2RGB)\n",
    "#     save_img[:, img_size[0] * 2:img_size[1] * 3, :] = cv.cvtColor(prediction, cv.COLOR_GRAY2RGB)\n",
    "\n",
    "#     savePath = os.path.join(preds_savePath, \"joint\")\n",
    "\n",
    "#     if not os.path.exists(savePath):\n",
    "#         os.makedirs(savePath)\n",
    "\n",
    "#     cv.imwrite(savePath + \"{0}\".format(test_img_Names[i]), save_img)\n",
    "\n",
    "# for i in range(test_X.shape[0]):\n",
    "\n",
    "#     img = test_orignal_images[i]\n",
    "\n",
    "#     gt = test_Y[i, :, :, 1]\n",
    "#     gt = mask2gray(gt)\n",
    "#     gt = cv.cvtColor(gt, cv.COLOR_GRAY2RGB)\n",
    "\n",
    "#     pred_mask = preds[i]\n",
    "#     pred_mask = mask2gray(pred_mask, input_type='pred_mask')\n",
    "\n",
    "#     savePath = os.path.join(preds_savePath, \"Imgs/\")\n",
    "#     saveMaskPath = os.path.join(preds_savePath, \"Mask/\")\n",
    "#     savePredPath = os.path.join(preds_savePath, \"Pred/\")\n",
    "\n",
    "#     if not os.path.exists(savePath):\n",
    "#         os.makedirs(savePath)\n",
    "#     if not os.path.exists(saveMaskPath):\n",
    "#         os.makedirs(saveMaskPath)\n",
    "#     if not os.path.exists(savePredPath):\n",
    "#         os.makedirs(savePredPath)\n",
    "\n",
    "#     cv.imwrite(os.path.join(savePath, \"{0}\".format(test_img_Names[i])), img)\n",
    "#     cv.imwrite(os.path.join(saveMaskPath, \"{0}\".format(test_img_Names[i])), gt)\n",
    "#     cv.imwrite(os.path.join(savePredPath, \"{0}\".format(test_img_Names[i])), pred)\n",
    "\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
