{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "7715f3fd",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using TensorFlow backend.\n",
      "/usr/local/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n",
      "/usr/local/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n",
      "/usr/local/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:528: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n",
      "/usr/local/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:529: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n",
      "/usr/local/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:530: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n",
      "/usr/local/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:535: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "WARNING: The TensorFlow contrib module will not be included in TensorFlow 2.0.\n",
      "For more information, please see:\n",
      "  * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n",
      "  * https://github.com/tensorflow/addons\n",
      "If you depend on functionality not listed there, please file an issue.\n",
      "\n"
     ]
    }
   ],
   "source": [
    "from keras.preprocessing.image import ImageDataGenerator\n",
    "import tensorflow as tf\n",
    "# from IBA.tensorflow_v1 import IBALayer\n",
    "import warnings\n",
    "import matplotlib.cbook\n",
    "warnings.filterwarnings(\"ignore\", category=matplotlib.cbook.mplDeprecation)\n",
    "from sklearn.metrics import classification_report\n",
    "from keras.utils.np_utils import *\n",
    "from keras.callbacks import LearningRateScheduler\n",
    "from sklearn.preprocessing import LabelBinarizer, label_binarize\n",
    "from keras.losses import categorical_crossentropy, mean_squared_error, binary_crossentropy\n",
    "from keras.utils.training_utils import multi_gpu_model\n",
    "from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping, CSVLogger\n",
    "from keras.optimizers import SGD, Adam\n",
    "from models.models import VGG16Net, VGG19Net, ResNet50Net\n",
    "from models.VGG import VGG16Net_IBA\n",
    "import matplotlib.pyplot as plt\n",
    "import utils_paths\n",
    "import numpy as np\n",
    "import random\n",
    "import pickle\n",
    "import cv2\n",
    "import os\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "c6761528",
   "metadata": {},
   "outputs": [],
   "source": [
    "def scheduler(epoch):\n",
    "\n",
    "    if epoch % 100 == 0 and epoch != 0:\n",
    "        lr = K.get_value(model.optimizer.lr)\n",
    "        K.set_value(model.optimizer.lr, lr * 0.1)\n",
    "        print(\"lr changed to {}\".format(lr * 0.1))\n",
    "    return K.get_value(model.optimizer.lr)\n",
    "\n",
    "def load_cla_data(train_imagePaths, target):\n",
    "\n",
    "    data = []\n",
    "    labels = []\n",
    "\n",
    "    for imagePath in train_imagePaths:\n",
    "\n",
    "        image = cv2.imread(imagePath)\n",
    "        image = cv2.resize(image, target)\n",
    "        data.append(image)\n",
    "        print(imagePath)\n",
    "        label = imagePath.split(os.path.sep)[-2]\n",
    "        labels.append(label)\n",
    "\n",
    "    data = np.array(data, dtype=\"float\")\n",
    "    #data = data.swapaxes(1, 3)\n",
    "    data = data / 255.0\n",
    "    labels = np.array(labels)\n",
    "\n",
    "    return data, labels"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "5c06b538",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /usr/local/lib/python3.7/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Colocations handled automatically by placer.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2022-05-09 06:34:04.191995: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA\n",
      "2022-05-09 06:34:04.229321: I tensorflow/core/platform/profile_utils/cpu_utils.cc:94] CPU Frequency: 2200095000 Hz\n",
      "2022-05-09 06:34:04.233623: I tensorflow/compiler/xla/service/service.cc:150] XLA service 0x5afe740 executing computations on platform Host. Devices:\n",
      "2022-05-09 06:34:04.233712: I tensorflow/compiler/xla/service/service.cc:158]   StreamExecutor device (0): <undefined>, <undefined>\n",
      "2022-05-09 06:34:04.715648: I tensorflow/compiler/xla/service/service.cc:150] XLA service 0x5baa7a0 executing computations on platform CUDA. Devices:\n",
      "2022-05-09 06:34:04.715739: I tensorflow/compiler/xla/service/service.cc:158]   StreamExecutor device (0): Tesla V100-PCIE-32GB, Compute Capability 7.0\n",
      "2022-05-09 06:34:04.716899: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1433] Found device 0 with properties: \n",
      "name: Tesla V100-PCIE-32GB major: 7 minor: 0 memoryClockRate(GHz): 1.38\n",
      "pciBusID: 0000:09:00.0\n",
      "totalMemory: 31.72GiB freeMemory: 12.62GiB\n",
      "2022-05-09 06:34:04.717008: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1512] Adding visible gpu devices: 0\n",
      "2022-05-09 06:34:04.720568: I tensorflow/core/common_runtime/gpu/gpu_device.cc:984] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
      "2022-05-09 06:34:04.720644: I tensorflow/core/common_runtime/gpu/gpu_device.cc:990]      0 \n",
      "2022-05-09 06:34:04.720667: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1003] 0:   N \n",
      "2022-05-09 06:34:04.721504: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 12279 MB memory) -> physical GPU (device: 0, name: Tesla V100-PCIE-32GB, pci bus id: 0000:09:00.0, compute capability: 7.0)\n"
     ]
    },
    {
     "ename": "IndexError",
     "evalue": "list index out of range",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mIndexError\u001b[0m                                Traceback (most recent call last)",
      "\u001b[0;32m/tmp/ipykernel_8932/3431579177.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m     14\u001b[0m \u001b[0;31m# 建立卷积神经网络\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     15\u001b[0m \u001b[0;31m# model = ResNet50Net.build(width=width, height=height, depth=depth, classes=2)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 16\u001b[0;31m \u001b[0mmodel\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mVGG16Net_IBA\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbuild\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mwidth\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mwidth\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mheight\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mheight\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdepth\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdepth\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mclasses\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     17\u001b[0m \u001b[0;31m# model = VGG16Net.build(width=width, height=height, depth=depth, classes=2)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     18\u001b[0m \u001b[0;31m# model = VGG19Net.build(width=width, height=height, depth=depth, classes=2)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/workspace/MTL-IBA/models/VGG.py\u001b[0m in \u001b[0;36mbuild\u001b[0;34m(width, height, depth, classes)\u001b[0m\n\u001b[1;32m     29\u001b[0m \u001b[0;31m#         output = IBALayer()(model.output)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     30\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 31\u001b[0;31m         \u001b[0moutput\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mIBALayer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutput\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     32\u001b[0m         \u001b[0moutput\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mFlatten\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'flatten'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutput\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     33\u001b[0m         \u001b[0moutput\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mDense\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m1024\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mactivation\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'relu'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'fc1'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutput\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/usr/local/lib/python3.7/site-packages/keras/engine/base_layer.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, inputs, **kwargs)\u001b[0m\n\u001b[1;32m    495\u001b[0m                                    \u001b[0minput_shapes\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minput_shape\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    496\u001b[0m                                    \u001b[0moutput_shapes\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moutput_shape\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 497\u001b[0;31m                                    arguments=user_kwargs)\n\u001b[0m\u001b[1;32m    498\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    499\u001b[0m             \u001b[0;31m# Apply activity regularizer if any:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/usr/local/lib/python3.7/site-packages/keras/engine/base_layer.py\u001b[0m in \u001b[0;36m_add_inbound_node\u001b[0;34m(self, input_tensors, output_tensors, input_masks, output_masks, input_shapes, output_shapes, arguments)\u001b[0m\n\u001b[1;32m    563\u001b[0m         \u001b[0;31m# Update tensor history, _keras_shape and _uses_learning_phase.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    564\u001b[0m         \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutput_tensors\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 565\u001b[0;31m             \u001b[0moutput_tensors\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_keras_shape\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0moutput_shapes\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    566\u001b[0m             uses_lp = any(\n\u001b[1;32m    567\u001b[0m                 [getattr(x, '_uses_learning_phase', False)\n",
      "\u001b[0;31mIndexError\u001b[0m: list index out of range"
     ]
    }
   ],
   "source": [
    "INIT_LR = 1e-3\n",
    "EPOCHS = 100\n",
    "BS = 8\n",
    "width = 224\n",
    "height = 224\n",
    "depth = 3\n",
    "target = (width, height)\n",
    "Name = \"VGG16Net_IBA_DES\"\n",
    "GPU = True\n",
    "if GPU:\n",
    "    os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n",
    "    os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n",
    "\n",
    "# 建立卷积神经网络\n",
    "# model = ResNet50Net.build(width=width, height=height, depth=depth, classes=2)\n",
    "model = VGG16Net_IBA.build(width=width, height=height, depth=depth, classes=2)\n",
    "# model = VGG16Net.build(width=width, height=height, depth=depth, classes=2)\n",
    "# model = VGG19Net.build(width=width, height=height, depth=depth, classes=2)\n",
    "# model = VGG16Net_IBA.build(width=width, height=height, depth=depth, classes=len(lb.classes_))\n",
    "#model = load_model('weights_VGG16_ICIS_100-0.8378.h5')\n",
    "model.summary()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "eff31e8a",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(\"------data loading-----\")\n",
    "train_path = 'dataset/DES_aug/train/images/'# train_path = './dataset/OCT/train/'\n",
    "test_path = train_path.replace('train', 'test')\n",
    "val_path = train_path.replace('train', 'val')\n",
    "\n",
    "train_imagePaths = sorted(list(utils_paths.list_images(train_path)))\n",
    "val_imagePaths = sorted(list(utils_paths.list_images(val_path)))\n",
    "test_imagePaths = sorted(list(utils_paths.list_images(test_path)))\n",
    "random.seed(307)\n",
    "random.shuffle(train_imagePaths)\n",
    "random.shuffle(val_imagePaths)\n",
    "random.shuffle(test_imagePaths)\n",
    "\n",
    "\n",
    "train_x, train_y = load_cla_data(train_imagePaths, target)\n",
    "val_x, val_y = load_cla_data(val_imagePaths, target)\n",
    "test_x, test_y = load_cla_data(test_imagePaths, target)\n",
    "\n",
    "lb = LabelBinarizer()\n",
    "train_y = lb.fit_transform(train_y)\n",
    "val_y = lb.fit_transform(val_y)\n",
    "test_y = lb.fit_transform(test_y)\n",
    "# train_y = label_binarize(train_y, np.arange(2))\n",
    "train_y = to_categorical(train_y, 2)\n",
    "val_y = to_categorical(val_y, 2)\n",
    "test_y = to_categorical(test_y, 2)\n",
    "\n",
    "# 数据增强处理\n",
    "# aug = ImageDataGenerator(rotation_range=30,\n",
    "#                          # brightness_range=(0.5, 0.9),\n",
    "#                          # rescale=0.8,\n",
    "#                          shear_range=0.2,\n",
    "#                          zoom_range=0.1,\n",
    "#                          horizontal_flip=True,\n",
    "#                          # vertical_flip=True,\n",
    "#                          fill_mode=\"nearest\"\n",
    "#                          )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "93b51391",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(\"------begin training -----\")\n",
    "opt = SGD(lr=INIT_LR, momentum=0.9, decay=1e-5, nesterov=True)\n",
    "#opt = SGD(lr=INIT_LR, decay=INIT_LR / EPOCHS)\n",
    "# opt = Adam(lr=INIT_LR)\n",
    "\n",
    "model.compile(loss=\"categorical_crossentropy\", optimizer=opt, metrics=[\"accuracy\"])\n",
    "# model.compile(loss=\"categorical_crossentropy\", optimizer=opt, metrics=[\"accuracy\"])\n",
    "\n",
    "\n",
    "# define callbacks\n",
    "csv_logger = CSVLogger(Name+'.log')\n",
    "\n",
    "reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=20, min_lr=1e-8, mode='auto', verbose=1)\n",
    "\n",
    "checkpoint_period1 = ModelCheckpoint(Name + '-{epoch:03d}-{val_acc:.4f}.h5',\n",
    "                                     monitor='val_acc', mode='auto', save_best_only='True', period=20)\n",
    "\n",
    "checkpoint_period2 = ModelCheckpoint(Name + '-{epoch:03d}-{val_acc:.4f}.h5',\n",
    "                                     monitor='val_acc', mode='auto', period=20)\n",
    "\n",
    "# earlyStopping = EarlyStopping(monitor='val_acc', patience=20, mode='auto')\n",
    "reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.1, patience=10, min_lr=1e-8, mode='auto', verbose=1)\n",
    "#\n",
    "#\n",
    "hist = model.fit(train_x, train_y,\n",
    "                 batch_size=BS,\n",
    "                 epochs=EPOCHS,\n",
    "                 validation_data=(val_x, val_y),\n",
    "                 class_weight=\"auto\",\n",
    "                 verbose=1,\n",
    "                 callbacks=[reduce_lr, csv_logger])\n",
    "\n",
    "# hist = model.fit_generator(aug.flow(train_x, train_y, batch_size=BS),\n",
    "#                            validation_data=aug.flow(val_x, val_y),\n",
    "#                            steps_per_epoch=len(train_x) // BS,\n",
    "#                            class_weight={0: 0.8, 1: 1.2},\n",
    "#                            validation_steps=5,\n",
    "#                            workers=6,\n",
    "#                            epochs=EPOCHS, callbacks=[checkpoint_period1, checkpoint_period2, csv_logger, reduce_lr])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "eac38aee",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(\"------Saving model------\")\n",
    "# model.save('vgg16_150_ICIS_1-700X700.h5')\n",
    "model_filename = Name + \".h5\"\n",
    "model.save_weights(model_filename)\n",
    "print('model saved to:', model_filename)\n",
    "\n",
    "\n",
    "print(\"------ testing ------\")\n",
    "predictions = model.predict(test_x, batch_size=8)\n",
    "print(classification_report(test_y.argmax(axis=1),\n",
    "      predictions.argmax(axis=1),\n",
    "      target_names=lb.classes_,\n",
    "      digits=6))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "427a431d",
   "metadata": {},
   "outputs": [],
   "source": [
    "N = np.arange(0, EPOCHS)\n",
    "plt.style.use(\"ggplot\")\n",
    "plt.figure()\n",
    "plt.plot(N, hist.history[\"loss\"], label=\"train_loss\")\n",
    "plt.plot(N, hist.history[\"val_loss\"], label=\"val_loss\")\n",
    "plt.plot(N, hist.history[\"acc\"], label=\"train_acc\")\n",
    "plt.plot(N, hist.history[\"val_acc\"], label=\"val_acc\")\n",
    "# plt.title(\"Training Loss and Accuracy\")\n",
    "plt.xlabel(\"Epoch #\")\n",
    "plt.ylabel(\"Loss/Accuracy\")\n",
    "plt.legend()\n",
    "plt.savefig('./output/' + Name + '_plot.png')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f96932f7",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
