{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "f355316a",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using TensorFlow backend.\n",
      "/usr/local/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n",
      "/usr/local/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n",
      "/usr/local/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:528: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n",
      "/usr/local/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:529: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n",
      "/usr/local/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:530: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n",
      "/usr/local/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:535: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "WARNING: The TensorFlow contrib module will not be included in TensorFlow 2.0.\n",
      "For more information, please see:\n",
      "  * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n",
      "  * https://github.com/tensorflow/addons\n",
      "If you depend on functionality not listed there, please file an issue.\n",
      "\n"
     ]
    }
   ],
   "source": [
    "from gernerate_data import load_clas_seg_data, load_clas_seg_data_for_test, load_SEG_data_for_test\n",
    "import tensorflow as tf\n",
    "from sklearn.metrics import classification_report, auc, roc_curve\n",
    "from keras.utils.np_utils import *\n",
    "from keras.callbacks import LearningRateScheduler\n",
    "from models.MTL_IBA import MTL_IBA, MTL_IBA_h3, MTL_IBA_cross3,MTL_IBA_s2c\n",
    "from models.MTL_Attention import MTL_Attention_model\n",
    "# from models.shareLayer import create_pair_model\n",
    "from sklearn.preprocessing import LabelBinarizer, label_binarize\n",
    "# from utils.losses import dice_coef_loss, dice_coef, dice_p_bce, dice_p_focal, tversky_loss, focal_loss, focal_tversky, \\\n",
    "#     p_r_f1_iou, generalized_dice_coeff, generalized_dice_loss\n",
    "from keras.losses import categorical_crossentropy, mean_squared_error, binary_crossentropy\n",
    "from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping, CSVLogger\n",
    "from keras.optimizers import Adam\n",
    "import matplotlib.pyplot as plt\n",
    "import keras.backend as K\n",
    "from keras import Model\n",
    "import utils_paths\n",
    "import numpy as np\n",
    "import pickle\n",
    "import os"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "3ed814ad",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "------------------------------------------------ Preparing model ------------------------------------------------\n",
      "WARNING:tensorflow:From /usr/local/lib/python3.7/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Colocations handled automatically by placer.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2022-08-15 09:27:44.078512: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA\n",
      "2022-08-15 09:27:44.125220: I tensorflow/core/platform/profile_utils/cpu_utils.cc:94] CPU Frequency: 2200095000 Hz\n",
      "2022-08-15 09:27:44.130025: I tensorflow/compiler/xla/service/service.cc:150] XLA service 0x68b78d0 executing computations on platform Host. Devices:\n",
      "2022-08-15 09:27:44.130113: I tensorflow/compiler/xla/service/service.cc:158]   StreamExecutor device (0): <undefined>, <undefined>\n",
      "2022-08-15 09:27:44.862045: I tensorflow/compiler/xla/service/service.cc:150] XLA service 0x6963900 executing computations on platform CUDA. Devices:\n",
      "2022-08-15 09:27:44.862131: I tensorflow/compiler/xla/service/service.cc:158]   StreamExecutor device (0): Tesla V100-PCIE-32GB, Compute Capability 7.0\n",
      "2022-08-15 09:27:44.863726: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1433] Found device 0 with properties: \n",
      "name: Tesla V100-PCIE-32GB major: 7 minor: 0 memoryClockRate(GHz): 1.38\n",
      "pciBusID: 0000:05:00.0\n",
      "totalMemory: 31.72GiB freeMemory: 31.31GiB\n",
      "2022-08-15 09:27:44.863784: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1512] Adding visible gpu devices: 0\n",
      "2022-08-15 09:27:44.868849: I tensorflow/core/common_runtime/gpu/gpu_device.cc:984] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
      "2022-08-15 09:27:44.868944: I tensorflow/core/common_runtime/gpu/gpu_device.cc:990]      0 \n",
      "2022-08-15 09:27:44.868998: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1003] 0:   N \n",
      "2022-08-15 09:27:44.870384: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 30458 MB memory) -> physical GPU (device: 0, name: Tesla V100-PCIE-32GB, pci bus id: 0000:05:00.0, compute capability: 7.0)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /usr/local/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n",
      "__________________________________________________________________________________________________\n",
      "Layer (type)                    Output Shape         Param #     Connected to                     \n",
      "==================================================================================================\n",
      "input_1 (InputLayer)            (None, 512, 512, 3)  0                                            \n",
      "__________________________________________________________________________________________________\n",
      "block1_conv1 (Conv2D)           (None, 512, 512, 64) 1792        input_1[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "block1_conv2 (Conv2D)           (None, 512, 512, 64) 36928       block1_conv1[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "block1_pool (MaxPooling2D)      (None, 256, 256, 64) 0           block1_conv2[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "block2_conv1 (Conv2D)           (None, 256, 256, 128 73856       block1_pool[0][0]                \n",
      "__________________________________________________________________________________________________\n",
      "block2_conv2 (Conv2D)           (None, 256, 256, 128 147584      block2_conv1[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "block2_pool (MaxPooling2D)      (None, 128, 128, 128 0           block2_conv2[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "block3_conv1 (Conv2D)           (None, 128, 128, 256 295168      block2_pool[0][0]                \n",
      "__________________________________________________________________________________________________\n",
      "block3_conv2 (Conv2D)           (None, 128, 128, 256 590080      block3_conv1[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "block3_conv3 (Conv2D)           (None, 128, 128, 256 590080      block3_conv2[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "block3_pool (MaxPooling2D)      (None, 64, 64, 256)  0           block3_conv3[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "block4_c_conv1 (Conv2D)         (None, 64, 64, 256)  590080      block3_pool[0][0]                \n",
      "__________________________________________________________________________________________________\n",
      "block4_s_conv1 (Conv2D)         (None, 64, 64, 256)  590080      block3_pool[0][0]                \n",
      "__________________________________________________________________________________________________\n",
      "block4_c_conv2 (Conv2D)         (None, 64, 64, 256)  590080      block4_c_conv1[0][0]             \n",
      "__________________________________________________________________________________________________\n",
      "block4_s_conv2 (Conv2D)         (None, 64, 64, 256)  590080      block4_s_conv1[0][0]             \n",
      "__________________________________________________________________________________________________\n",
      "block4_c_conv3 (Conv2D)         (None, 64, 64, 256)  590080      block4_c_conv2[0][0]             \n",
      "__________________________________________________________________________________________________\n",
      "block4_s_conv3 (Conv2D)         (None, 64, 64, 256)  590080      block4_s_conv2[0][0]             \n",
      "__________________________________________________________________________________________________\n",
      "add_1 (Add)                     (None, 64, 64, 256)  0           block4_s_conv3[0][0]             \n",
      "                                                                 block4_s_conv2[0][0]             \n",
      "                                                                 block4_c_conv3[0][0]             \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_1 (Conv2D)               (None, 64, 64, 256)  65792       add_1[0][0]                      \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_2 (Conv2D)               (None, 64, 64, 256)  65792       conv2d_1[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "iba (IBALayer)                  (None, 64, 64, 256)  0           block4_c_conv3[0][0]             \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_3 (Conv2D)               (None, 64, 64, 256)  65792       conv2d_2[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "multiply_1 (Multiply)           (None, 64, 64, 256)  0           conv2d_3[0][0]                   \n",
      "                                                                 iba[0][0]                        \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_3 (BatchNor (None, 64, 64, 256)  1024        multiply_1[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "activation_1 (Activation)       (None, 64, 64, 256)  0           batch_normalization_3[0][0]      \n",
      "__________________________________________________________________________________________________\n",
      "block4_s_pool (MaxPooling2D)    (None, 32, 32, 256)  0           activation_1[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "block5_s_conv1 (Conv2D)         (None, 32, 32, 512)  1180160     block4_s_pool[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "block5_s_conv2 (Conv2D)         (None, 32, 32, 512)  2359808     block5_s_conv1[0][0]             \n",
      "__________________________________________________________________________________________________\n",
      "block5_s_conv3 (Conv2D)         (None, 32, 32, 512)  2359808     block5_s_conv2[0][0]             \n",
      "__________________________________________________________________________________________________\n",
      "up_sampling2d_1 (UpSampling2D)  (None, 64, 64, 512)  0           block5_s_conv3[0][0]             \n",
      "__________________________________________________________________________________________________\n",
      "concatenate_2 (Concatenate)     (None, 64, 64, 768)  0           block4_s_conv3[0][0]             \n",
      "                                                                 up_sampling2d_1[0][0]            \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_7 (Conv2D)               (None, 64, 64, 512)  3539456     concatenate_2[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_5 (BatchNor (None, 64, 64, 512)  2048        conv2d_7[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "activation_3 (Activation)       (None, 64, 64, 512)  0           batch_normalization_5[0][0]      \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_8 (Conv2D)               (None, 64, 64, 512)  2359808     activation_3[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_6 (BatchNor (None, 64, 64, 512)  2048        conv2d_8[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "activation_4 (Activation)       (None, 64, 64, 512)  0           batch_normalization_6[0][0]      \n",
      "__________________________________________________________________________________________________\n",
      "up_sampling2d_2 (UpSampling2D)  (None, 128, 128, 512 0           activation_4[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "concatenate_3 (Concatenate)     (None, 128, 128, 768 0           block3_conv3[0][0]               \n",
      "                                                                 up_sampling2d_2[0][0]            \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_9 (Conv2D)               (None, 128, 128, 256 1769728     concatenate_3[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_7 (BatchNor (None, 128, 128, 256 1024        conv2d_9[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "activation_5 (Activation)       (None, 128, 128, 256 0           batch_normalization_7[0][0]      \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_10 (Conv2D)              (None, 128, 128, 256 590080      activation_5[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_8 (BatchNor (None, 128, 128, 256 1024        conv2d_10[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "activation_6 (Activation)       (None, 128, 128, 256 0           batch_normalization_8[0][0]      \n",
      "__________________________________________________________________________________________________\n",
      "up_sampling2d_3 (UpSampling2D)  (None, 256, 256, 256 0           activation_6[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "concatenate_4 (Concatenate)     (None, 256, 256, 384 0           block2_conv2[0][0]               \n",
      "                                                                 up_sampling2d_3[0][0]            \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_11 (Conv2D)              (None, 256, 256, 128 442496      concatenate_4[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_9 (BatchNor (None, 256, 256, 128 512         conv2d_11[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_1 (BatchNor (None, 64, 64, 256)  1024        iba[0][0]                        \n",
      "__________________________________________________________________________________________________\n",
      "activation_7 (Activation)       (None, 256, 256, 128 0           batch_normalization_9[0][0]      \n",
      "__________________________________________________________________________________________________\n",
      "block4_c_pool (MaxPooling2D)    (None, 32, 32, 256)  0           batch_normalization_1[0][0]      \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_12 (Conv2D)              (None, 256, 256, 128 147584      activation_7[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "block5_c_conv1 (Conv2D)         (None, 32, 32, 512)  1180160     block4_c_pool[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_10 (BatchNo (None, 256, 256, 128 512         conv2d_12[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "block5_c_conv2 (Conv2D)         (None, 32, 32, 512)  2359808     block5_c_conv1[0][0]             \n",
      "__________________________________________________________________________________________________\n",
      "activation_8 (Activation)       (None, 256, 256, 128 0           batch_normalization_10[0][0]     \n",
      "__________________________________________________________________________________________________\n",
      "block5_c_conv3 (Conv2D)         (None, 32, 32, 512)  2359808     block5_c_conv2[0][0]             \n",
      "__________________________________________________________________________________________________\n",
      "up_sampling2d_4 (UpSampling2D)  (None, 512, 512, 128 0           activation_8[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_2 (BatchNor (None, 32, 32, 512)  2048        block5_c_conv3[0][0]             \n",
      "__________________________________________________________________________________________________\n",
      "concatenate_5 (Concatenate)     (None, 512, 512, 192 0           block1_conv2[0][0]               \n",
      "                                                                 up_sampling2d_4[0][0]            \n",
      "__________________________________________________________________________________________________\n",
      "block5_c_pool (MaxPooling2D)    (None, 16, 16, 512)  0           batch_normalization_2[0][0]      \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_13 (Conv2D)              (None, 512, 512, 64) 110656      concatenate_5[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "cls_flatten (Flatten)           (None, 131072)       0           block5_c_pool[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_11 (BatchNo (None, 512, 512, 64) 256         conv2d_13[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "cls_dense_0 (Dense)             (None, 64)           8388672     cls_flatten[0][0]                \n",
      "__________________________________________________________________________________________________\n",
      "activation_9 (Activation)       (None, 512, 512, 64) 0           batch_normalization_11[0][0]     \n",
      "__________________________________________________________________________________________________\n",
      "cls_act_3 (Activation)          (None, 64)           0           cls_dense_0[0][0]                \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_14 (Conv2D)              (None, 512, 512, 64) 36928       activation_9[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "cls_dropout (Dropout)           (None, 64)           0           cls_act_3[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_12 (BatchNo (None, 512, 512, 64) 256         conv2d_14[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "cls_dense_out (Dense)           (None, 2)            130         cls_dropout[0][0]                \n",
      "__________________________________________________________________________________________________\n",
      "activation_10 (Activation)      (None, 512, 512, 64) 0           batch_normalization_12[0][0]     \n",
      "__________________________________________________________________________________________________\n",
      "classification_output (Activati (None, 2)            0           cls_dense_out[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "segmentation_output (Conv2D)    (None, 512, 512, 2)  1154        activation_10[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "seg-out (Conv2D)                (None, 512, 512, 2)  1154        activation_10[0][0]              \n",
      "==================================================================================================\n",
      "Total params: 34,672,518\n",
      "Trainable params: 34,666,630\n",
      "Non-trainable params: 5,888\n",
      "__________________________________________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "INIT_LR = 2e-4\n",
    "EPOCHS =200\n",
    "batch_size = 8\n",
    "depth = 3\n",
    "img_size = 512\n",
    "Name = \"MTL_IBA_cross3_512_2e-4\"\n",
    "GPU = True\n",
    "target = (img_size, img_size)\n",
    "\n",
    "if GPU:\n",
    "    os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n",
    "    os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n",
    "\n",
    "\n",
    "    \n",
    "print(\"------------------------------------------------ Preparing model ------------------------------------------------\")\n",
    "model = MTL_IBA_cross3(img_size, img_size, depth, nClasses=2)\n",
    "# model = MTL_Attention_model(img_size, img_size, depth, nClasses=2)\n",
    "model.summary()\n",
    "# model.load_weights(\"MTL_IBA_cross3_2.h5\", by_name=True)\n",
    "\n",
    "model.load_weights(\"MTL_IBA_cross3_512.h5\", by_name=True)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "f5fcbaf7",
   "metadata": {},
   "outputs": [],
   "source": [
    "# trainX_dir = 'dataset/LE/test/images/'\n",
    "# trainX_dir = 'dataset/LE-NEW/train/images'\n",
    "# trainX_dir = 'dataset/Dataset_BUSI_AN/train/images'\n",
    "trainX_dir = 'dataset/Dataset_BUSI_AN/val/images'\n",
    "testX_dir = trainX_dir.replace('val', 'test')\n",
    "\n",
    "\n",
    "train_x,  train_c_y, train_s_y = load_clas_seg_data(trainX_dir, target)\n",
    "# val_x, val_c_y, val_s_y = load_clas_seg_data(valX_dir, target)\n",
    "test_x, test_c_y, test_s_y = load_clas_seg_data(testX_dir, target)\n",
    "\n",
    "\n",
    "lb = LabelBinarizer()\n",
    "train_c_y = lb.fit_transform(train_c_y)\n",
    "# val_c_y = lb.fit_transform(val_c_y)\n",
    "test_c_y = lb.fit_transform(test_c_y)\n",
    "train_c_y = to_categorical(train_c_y, 2)\n",
    "# val_c_y = to_categorical(val_c_y, 2)\n",
    "test_c_y = to_categorical(test_c_y, 2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "c6931d5e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# from models.shareLayer import create_pair_model\n",
    "# from models.MLT_net import MTL_classic, MTL_3\n",
    "\n",
    "# model = MTL_classic(img_size, img_size, depth, nClasses=2)\n",
    "# model = MTL_3(img_size, img_size, depth, nClasses=2)\n",
    "# model = create_pair_model(img_size, img_size, depth, nClasses=2)\n",
    "# model.summary()\n",
    "\n",
    "\n",
    "# define callbacks\n",
    "csv_logger = CSVLogger(Name+'.log')\n",
    "\n",
    "reduce_lr = ReduceLROnPlateau(monitor='val_segmentation_output_loss', factor=0.2, patience=20, min_lr=1e-8, mode='auto', verbose=1)\n",
    "\n",
    "checkpoint_period1 = ModelCheckpoint(Name + '-{epoch:03d}-{val_classification_output_acc:.4f}.h5',\n",
    "                                     monitor='val_classification_output_acc', mode='auto', save_best_only='True')\n",
    "\n",
    "\n",
    "\n",
    "# define loss and compile the model\n",
    "\n",
    "opt = Adam(lr=INIT_LR, beta_1=0.9, beta_2=0.99, epsilon=1e-08, decay=0.01)\n",
    "\n",
    "def binary_crossentropy(y_true, y_pred):\n",
    "    e=1.0\n",
    "    return K.mean(-(y_true*K.log(y_pred+K.epsilon())+\n",
    "                    e*(1-y_true)*K.log(1-y_pred+K.epsilon())),\n",
    "                  axis=-1)\n",
    "\n",
    "def tversky(y_true, y_pred, smooth=1):\n",
    "    y_true_pos = K.flatten(y_true)\n",
    "    y_pred_pos = K.flatten(y_pred)\n",
    "    true_pos = K.sum(y_true_pos * y_pred_pos)\n",
    "    false_neg = K.sum(y_true_pos * (1 - y_pred_pos))\n",
    "    false_pos = K.sum((1 - y_true_pos) * y_pred_pos)\n",
    "    alpha = 0.5\n",
    "    return (true_pos + smooth) / (true_pos + alpha * false_neg + (1 - alpha) * false_pos + smooth)\n",
    "\n",
    "def tversky_loss(y_true, y_pred):\n",
    "    return 1 - tversky(y_true, y_pred)\n",
    "\n",
    "# smooth 参数防止分母为0\n",
    "def dice_coef(y_true, y_pred, smooth=1):\n",
    "    smooth = 0.0005\n",
    "    y_true_f = K.flatten(y_true)\n",
    "    y_pred_f = K.flatten(y_pred)\n",
    "    intersection = K.sum(y_true_f * y_pred_f)\n",
    "    return K.mean((2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth))\n",
    "\n",
    "\n",
    "from keras.losses import categorical_crossentropy\n",
    "\n",
    "model.compile(loss={'segmentation_output': categorical_crossentropy, 'seg-out': tversky_loss, \"classification_output\": binary_crossentropy},\n",
    "              loss_weights={'segmentation_output': 0.35,'seg-out': 0.35, \"classification_output\": 0.3},\n",
    "              optimizer=opt, \n",
    "              metrics={'segmentation_output':'accuracy', 'seg-out':dice_coef, 'classification_output':'accuracy'})\n",
    "\n",
    "# model.compile(loss={'segmentation_output': categorical_crossentropy, 'seg-out': tversky_loss, \"classification_output\": binary_crossentropy},\n",
    "#               loss_weights={'segmentation_output': 0.15,'seg-out': 0.15, \"classification_output\": 0.7},\n",
    "#               optimizer=opt, \n",
    "#               metrics={'segmentation_output':'accuracy', 'seg-out':dice_coef, 'classification_output':'accuracy'})\n",
    "\n",
    "# # model.compile(loss={'segmentation_output': focal_tversky, \"classification_output\": binary_crossentropy},\n",
    "# model.compile(loss={'segmentation_output': 'categorical_crossentropy', \"classification_output\": binary_crossentropy},\n",
    "#               loss_weights={'segmentation_output': 0.5, \"classification_output\": 0.5},\n",
    "#               optimizer=Adam(lr=1e-3), \n",
    "#               metrics={'segmentation_output':['accuracy', dice_coef], 'classification_output':'accuracy'})\n",
    "\n",
    "# model.compile(loss={'seg_out': 'categorical_crossentropy', 'cls_out': binary_crossentropy},\n",
    "#               optimizer=Adam(lr=1e-4), metrics={'seg_out':dice_coef, 'cls_out':'accuracy'},\n",
    "#               loss_weights={'seg_out':0.5, 'cls_out':0.5})\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "a3eb4e1c",
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'val_x' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[0;32m/tmp/ipykernel_11349/1429888672.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m      5\u001b[0m                  \u001b[0mbatch_size\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      6\u001b[0m                  \u001b[0mepochs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m200\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 7\u001b[0;31m                  \u001b[0mvalidation_data\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mval_x\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mval_c_y\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mval_s_y\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mval_s_y\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      8\u001b[0m                  \u001b[0mverbose\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      9\u001b[0m                  \u001b[0mcallbacks\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mreduce_lr\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcsv_logger\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mNameError\u001b[0m: name 'val_x' is not defined"
     ]
    }
   ],
   "source": [
    "# print(\"================================================ begin training ================================================\")\n",
    "\n",
    "hist = model.fit(train_x,\n",
    "                 [train_c_y, train_s_y, train_s_y],\n",
    "                 batch_size=batch_size,\n",
    "                 epochs=200,\n",
    "                 validation_data=(val_x, [val_c_y, val_s_y, val_s_y]),\n",
    "                 verbose=1,\n",
    "                 callbacks=[reduce_lr, csv_logger],\n",
    "                 shuffle=True)\n",
    "\n",
    "print(\"================================================ Saving model ================================================\")\n",
    "model_filename = Name + \".h5\"\n",
    "model.save_weights(model_filename)\n",
    "print('model saved to:', model_filename)\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "# print(\"================================================ begin training ================================================\")\n",
    "\n",
    "# hist = model.fit([train_x,train_x],\n",
    "#                  [train_c_y, train_s_y],\n",
    "#                  batch_size=batch_size,\n",
    "#                  epochs=200,\n",
    "#                  validation_data=([val_x,val_x], [val_c_y, val_s_y]),\n",
    "#                  verbose=1,\n",
    "#                  callbacks=[reduce_lr, csv_logger],\n",
    "#                  shuffle=True)\n",
    "\n",
    "# print(\"================================================ Saving model ================================================\")\n",
    "# model_filename = Name + \"4.h5\"\n",
    "# model.save_weights(model_filename)\n",
    "# print('model saved to:', model_filename)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "28078269",
   "metadata": {},
   "outputs": [],
   "source": [
    "N = np.arange(0, EPOCHS)\n",
    "plt.style.use(\"ggplot\")\n",
    "plt.figure()\n",
    "# plt.plot(N, hist.history[\"loss\"], label=\"train_loss\")\n",
    "plt.plot(N, hist.history[\"classification_output_loss\"], label=\"train_c_loss\")\n",
    "plt.plot(N, hist.history[\"segmentation_output_loss\"], label=\"train_s_loss\")\n",
    "\n",
    "plt.plot(N, hist.history[\"classification_output_acc\"], label=\"train_c_acc\")\n",
    "plt.plot(N, hist.history[\"segmentation_output_acc\"], label=\"train_s_acc\")\n",
    "plt.plot(N, hist.history[\"seg-out_dice_coef\"], label=\"train_s_dice\")\n",
    "\n",
    "# plt.plot(N, hist.history[\"val_loss\"], label=\"val_loss\")\n",
    "plt.plot(N, hist.history[\"val_classification_output_loss\"], label=\"val_c_loss\")\n",
    "plt.plot(N, hist.history[\"val_segmentation_output_loss\"], label=\"val_s_loss\")\n",
    "\n",
    "plt.plot(N, hist.history[\"val_classification_output_acc\"], label=\"val_c_acc\")\n",
    "plt.plot(N, hist.history[\"val_segmentation_output_acc\"], label=\"val_s_acc\")\n",
    "plt.plot(N, hist.history[\"val_seg-out_dice_coef\"], label=\"val_s_dice\")\n",
    "\n",
    "plt.title(\"Training Loss and Accuracy\")\n",
    "plt.xlabel(\"Epoch #\")\n",
    "plt.ylabel(\"Loss/Accuracy/DSC\")\n",
    "plt.legend()\n",
    "plt.savefig(os.path.join('output/', Name+'.png'))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "452674d3",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "------Start predicting------\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2022-08-05 10:19:51.385369: I tensorflow/stream_executor/dso_loader.cc:152] successfully opened CUDA library libcublas.so.10.0 locally\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "------------------------------------------------ Classification testing ------------------------------------------------\n",
      "                  precision    recall  f1-score   support\n",
      "\n",
      "   bengin_images   0.933333  0.976744  0.954545        43\n",
      "malignant_images   0.947368  0.857143  0.900000        21\n",
      "\n",
      "        accuracy                       0.937500        64\n",
      "       macro avg   0.940351  0.916944  0.927273        64\n",
      "    weighted avg   0.937939  0.937500  0.936648        64\n",
      "\n",
      "------------------------------------------------ Segmentation testing ------------------------------------------------\n",
      "Test total loss: 0.2971489503979683\n",
      "Test classification loss: 0.5242930594831705\n",
      "Test segmentation loss: 0.2942817658185959\n",
      "Test classification accuracy: 0.9375\n",
      "Test segmentation accuracy: 0.9075498580932617\n",
      "Test segmentation dice_coef: 0.8946788311004639\n",
      "64/64 [==============================] - 11s 167ms/step\n",
      "AUC: 0.9536124352660287\n"
     ]
    }
   ],
   "source": [
    "print(\"------Start predicting------\")\n",
    "\n",
    "\n",
    "predictions_c, predictions_s, predictions_s2 = model.predict(test_x, batch_size=32)\n",
    "\n",
    "\n",
    "print(\"------------------------------------------------ Classification testing ------------------------------------------------\")\n",
    "\n",
    "print(classification_report(test_c_y.argmax(axis=1), predictions_c.argmax(axis=1), target_names=lb.classes_, digits=6))\n",
    "\n",
    "print(\"------------------------------------------------ Segmentation testing ------------------------------------------------\")\n",
    "\n",
    "\n",
    "# evaluate the model\n",
    "# loss = model.evaluate(test_x, [test_c_y, test_s_y], verbose=0)\n",
    "loss, cla_loss, seg_loss, seg_loss2, cla_acc, seg_acc, seg_dice_coef = model.evaluate(test_x, [test_c_y, test_s_y, test_s_y], verbose=0)\n",
    "print('Test total loss:', loss)\n",
    "print('Test classification loss:', cla_loss)\n",
    "print('Test segmentation loss:', seg_loss)\n",
    "\n",
    "print('Test classification accuracy:', cla_acc)\n",
    "print('Test segmentation accuracy:', seg_acc)\n",
    "print('Test segmentation dice_coef:', seg_dice_coef)\n",
    "\n",
    "preds_c, preds_s, preds_s2 = model.predict(test_x, batch_size=8, verbose=1)\n",
    "test_mask = test_s_y.flatten()\n",
    "pred_mask = preds_s2.flatten()\n",
    "fpr, tpr, thresholds = roc_curve(test_mask, pred_mask, pos_label=1)\n",
    "roc_auc = auc(fpr, tpr)\n",
    "print(\"AUC:\", roc_auc)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "0066f861",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      " 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1]\n",
      "[0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      " 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 0 1 0 1 1 0 1 1 1 1 1]\n"
     ]
    }
   ],
   "source": [
    "print(test_c_y.argmax(axis=1))\n",
    "# print(test_c_y.argmax(axis=1))\n",
    "\n",
    "print(predictions_c.argmax(axis=1))\n",
    "# print(predictions_c)\n",
    "\n",
    "prediction = predictions_c.argmax(axis=1)\n",
    "label = test_c_y.argmax(axis=1)\n",
    "\n",
    "# # 配对T检验\n",
    "# from scipy import stats\n",
    "# ttest = stats.ttest_rel(prediction, label, axis=0, nan_policy='propagate')\n",
    "# print(ttest)\n",
    "\n",
    "# 预测结果保存在CSV中\n",
    "import csv\n",
    "\n",
    "csvFile = open(Name + \".csv\", \"w\")            #创建csv文件\n",
    "writer = csv.writer(csvFile)                  #创建写的对象\n",
    "#先写入columns_name     \n",
    "writer.writerow([\"id\",\"label\",\"prediction\",\"prediction probability\",\"bengin\",\"malignant\"]) \n",
    "\n",
    "for i in range(len(predictions_c)): \n",
    "  \n",
    "    #找到最大概率对应的索引号，该图片即为该索引号对应的类别\n",
    "#     max_value,index = t.max(probability,1)\n",
    "#     class_index = result_(index)\n",
    "#     probability=np.round(probability.cpu().detach().numpy(),3)\n",
    "    writer.writerow([i, label[i], prediction[i], predictions_c[i][prediction[i]], predictions_c[i][0], predictions_c[i][1]])\n",
    "csvFile.close()\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "18827d69",
   "metadata": {},
   "source": [
    "## 只加载分割数据测试每个类别的准确率"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7de48f22",
   "metadata": {},
   "outputs": [],
   "source": [
    "import cv2\n",
    "\n",
    "testX_dir=\"dataset/Dataset_BUSI_AN/test_M/images\"\n",
    "\n",
    "test_X, test_Y, test_img_Names, test_img_Names = load_SEG_data_for_test(testX_dir, target=target, shuffle=False)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8fdd8583",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# define loss and compile the model\n",
    "\n",
    "opt = Adam(lr=INIT_LR, beta_1=0.9, beta_2=0.99, epsilon=1e-08, decay=0.01)\n",
    "\n",
    "def binary_crossentropy(y_true, y_pred):\n",
    "    e=1.0\n",
    "    return K.mean(-(y_true*K.log(y_pred+K.epsilon())+\n",
    "                    e*(1-y_true)*K.log(1-y_pred+K.epsilon())),\n",
    "                  axis=-1)\n",
    "\n",
    "def tversky(y_true, y_pred, smooth=1):\n",
    "    y_true_pos = K.flatten(y_true)\n",
    "    y_pred_pos = K.flatten(y_pred)\n",
    "    true_pos = K.sum(y_true_pos * y_pred_pos)\n",
    "    false_neg = K.sum(y_true_pos * (1 - y_pred_pos))\n",
    "    false_pos = K.sum((1 - y_true_pos) * y_pred_pos)\n",
    "    alpha = 0.5\n",
    "    return (true_pos + smooth) / (true_pos + alpha * false_neg + (1 - alpha) * false_pos + smooth)\n",
    "\n",
    "def tversky_loss(y_true, y_pred):\n",
    "    return 1 - tversky(y_true, y_pred)\n",
    "\n",
    "\n",
    "model2=Model(inputs=model.inputs, outputs=[model.get_layer('segmentation_output').output, model.get_layer('seg-out').output])\n",
    "\n",
    "model2.compile(loss={'segmentation_output': categorical_crossentropy, 'seg-out': tversky_loss},\n",
    "              loss_weights={'segmentation_output': 0.5,'seg-out': 0.5},\n",
    "              optimizer=opt, \n",
    "              metrics={'segmentation_output':'accuracy', 'seg-out':dice_coef})\n",
    "\n",
    "predictions_s, predictions_s2 = model2.predict(test_X, batch_size=8)\n",
    "loss, seg_loss, seg_loss2, seg_acc, seg_dice_coef = model2.evaluate(test_X, [test_Y, test_Y], verbose=0)\n",
    "\n",
    "preds_c, preds_s, preds_s2 = model.predict(test_X, batch_size=8, verbose=1)\n",
    "test_mask = test_Y[:,:,:,0] - test_Y[:,:,:,1]\n",
    "print(test_mask.shape)\n",
    "print(preds_s.shape)\n",
    "pred_mask = preds_s[:,:,:,0] - preds_s[:,:,:,1]\n",
    "print(pred_mask.shape)\n",
    "# def dice_coef(y_true, y_pred, smooth=1):\n",
    "#     smooth = 0.0005\n",
    "#     y_true_f = y_true.flatten()\n",
    "#     y_pred_f = y_pred.flatten()\n",
    "#     intersection = K.sum(y_true_f * y_pred_f)\n",
    "#     return K.mean((2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth))\n",
    "\n",
    "def dice_ratio(y_true, y_pred):\n",
    "    \"\"\"\n",
    "    define the dice ratio\n",
    "    :param y_pred: segmentation result\n",
    "    :param y_true: ground truth\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    y_pred = y_pred.flatten()\n",
    "    y_pred[y_pred > 0.5] = 1\n",
    "    y_pred[y_pred <= 0.5] = 0\n",
    "\n",
    "    y_true = y_true.flatten()\n",
    "    y_true[y_true > 0.5] = 1\n",
    "    y_true[y_true <= 0.5] = 0\n",
    "\n",
    "    same = (y_pred * y_true).sum()\n",
    "\n",
    "    dice = 2*float(same)/float(y_true.sum() + y_pred.sum())\n",
    "\n",
    "    return dice\n",
    "\n",
    "dice = dice_ratio(test_mask, pred_mask)\n",
    "\n",
    "print('Test segmentation accuracy:', seg_acc)\n",
    "print('Test segmentation dice_coef:', seg_dice_coef)\n",
    "print(\"Dice:\", dice)\n",
    "test_mask = test_Y.flatten()\n",
    "pred_mask = preds_s.flatten()\n",
    "fpr, tpr, thresholds = roc_curve(test_mask, pred_mask, pos_label=1)\n",
    "roc_auc = auc(fpr, tpr)\n",
    "print(\"AUC:\", roc_auc)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "93e8192f",
   "metadata": {},
   "source": [
    "## 保存预测结果"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2959c28a",
   "metadata": {},
   "outputs": [],
   "source": [
    "from gernerate_data import load_SEG_data_for_test\n",
    "import cv2\n",
    "def mask2gray(mask, input_type=None):\n",
    "    if input_type is 'pred':\n",
    "        mask0 = mask[:, :, 0]\n",
    "        mask1 = mask[:, :, 1]\n",
    "        mask = mask1 - mask0\n",
    "        print(\"mask.shape: \", mask.shape)\n",
    "        #mask = np.argmax(mask, axis=-1)\n",
    "        # print( mask[mask > 0.5] )\n",
    "        mask[mask > 0.5] = 255\n",
    "        mask[mask <= 0.5] = 0\n",
    "    mask = mask.astype(dtype=np.uint8)\n",
    "    rst = mask.copy()\n",
    "    cv2.normalize(mask, rst, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)\n",
    "    return rst\n",
    "\n",
    "print(\"------------------------------ Saving predictions ------------------------------------\")\n",
    "\n",
    "\n",
    "preds = preds_s\n",
    "preds_savePath = \"predcition/BUSI/ECA_s_0.5\" # + Name\n",
    "img_size = (224, 224)\n",
    "\n",
    "# 保存之前读取原始图像和文件名列表\n",
    "# test_X, test_Y, test_img_Names,orignal_imgs = load_SEG_data_for_test(testX_dir, img_size)\n",
    "\n",
    "\n",
    "# for i in range(test_X.shape[0]):\n",
    "#     img = test_orignal_images[i]\n",
    "\n",
    "#     gt = test_Y[i, :, :, 1]\n",
    "#     gt = mask2gray(gt)\n",
    "\n",
    "#     pred = preds[i]\n",
    "#     prediction = mask2gray(pred, input_type='pred')\n",
    "\n",
    "#     save_img = np.zeros([img_size[0], img_size[1]*3, 3], dtype=np.uint8)\n",
    "#     save_img[:, 0:img_size[0], :] = img[:, :, ::-1]\n",
    "#     save_img[:, img_size[0]:img_size[1] * 2, :] = cv2.cvtColor(gt, cv2.COLOR_GRAY2RGB)\n",
    "#     save_img[:, img_size[0] * 2:img_size[1] * 3, :] = cv2.cvtColor(prediction, cv2.COLOR_GRAY2RGB)\n",
    "\n",
    "#     savePath = os.path.join(preds_savePath, \"joint\")\n",
    "\n",
    "#     if not os.path.exists(savePath):\n",
    "#         os.makedirs(savePath)\n",
    "\n",
    "#     cv2.imwrite(savePath + \"{0}\".format(test_img_Names[i]), save_img)\n",
    "\n",
    "for i in range(test_X.shape[0]):\n",
    "\n",
    "    img = orignal_imgs[i]\n",
    "  \n",
    "    gt = test_Y[i, :, :, 1]\n",
    "    gt = mask2gray(gt)\n",
    "    gt = cv2.cvtColor(gt, cv2.COLOR_GRAY2RGB)\n",
    "    \n",
    "    pred = preds[i]\n",
    "    pred = mask2gray(pred, input_type='pred')\n",
    "    pred = cv2.cvtColor(pred, cv2.COLOR_GRAY2RGB)\n",
    "    \n",
    "    savePath = os.path.join(preds_savePath, 'Images')\n",
    "    saveMaskPath = os.path.join(preds_savePath, 'Masks')\n",
    "    savePredPath = os.path.join(preds_savePath, 'Predictions')\n",
    "    \n",
    "    if not os.path.exists(savePath):\n",
    "        os.makedirs(savePath)\n",
    "    if not os.path.exists(saveMaskPath):\n",
    "        os.makedirs(saveMaskPath)\n",
    "    if not os.path.exists(savePredPath):\n",
    "        os.makedirs(savePredPath)\n",
    "    \n",
    "    cv2.imwrite(savePath + \"/{0}\".format(test_img_Names[i]), img)\n",
    "    cv2.imwrite(saveMaskPath + \"/{0}\".format(test_img_Names[i]), gt)\n",
    "    cv2.imwrite(savePredPath + \"/{0}\".format(test_img_Names[i]), pred)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "695c4f86",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
