{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 52,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow as tf \n",
    "import pathlib\n",
    "from tensorflow import keras\n",
    "from tensorflow.keras import layers\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import os,math\n",
    "import random\n",
    "from tqdm import tqdm\n",
    "AUTOTUNE = tf.data.experimental.AUTOTUNE\n",
    "from sklearn.model_selection import train_test_split\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "from keras import layers\n",
    "from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Flatten, Dense, Concatenate\n",
    "from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D\n",
    "from keras.models import Model, load_model\n",
    "from keras.preprocessing import image\n",
    "from keras.utils import layer_utils\n",
    "from keras.utils.data_utils import get_file\n",
    "from keras.applications.imagenet_utils import preprocess_input\n",
    "# import pydot\n",
    "# from IPython.display import SVG\n",
    "from keras.utils.vis_utils import model_to_dot\n",
    "from keras.initializers import glorot_uniform\n",
    "import scipy.misc\n",
    "from matplotlib.pyplot import imshow\n",
    "%matplotlib inline\n",
    "from keras.models import Sequential\n",
    "import keras.backend as K\n",
    "K.set_image_data_format('channels_last')\n",
    "K.set_learning_phase(1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "#数据所在文件夹\n",
    "data_path = '../data/'\n",
    "data_root = pathlib.Path(data_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# 获取图片路径\n",
    "all_image_path = list(data_root.glob('*/*'))\n",
    "all_image_path = [str(item) for item in all_image_path]\n",
    "random.shuffle(all_image_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['../data/melanoma/ISIC_0000172.jpg',\n",
       " '../data/vascular lesion/ISIC_0025425.jpg',\n",
       " '../data/melanoma/ISIC_0010396.jpg',\n",
       " '../data/vascular lesion/ISIC_0033608.jpg',\n",
       " '../data/basal cell carcinoma/ISIC_0026442.jpg']"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "all_image_path[:5]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 获取类标签\n",
    "image_label = []\n",
    "for item in data_root.glob('*/'):\n",
    "    if str(item)[-3:] !=  'txt':\n",
    "        image_label.append(item.name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 为标签添加索引\n",
    "label_dict = dict()\n",
    "for name,index in enumerate(image_label):\n",
    "    label_dict[index] = name"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 图片数据对应的标签\n",
    "all_image_label = []\n",
    "for path in all_image_path:\n",
    "    all_image_label.append(int(label_dict[pathlib.Path(path).parent.name]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 图片数据处理\n",
    "def preprocess_image(image):\n",
    "    image = tf.image.decode_jpeg(image, channels=3)\n",
    "    image = tf.image.resize(image, [224,224])\n",
    "    image /= 255.0 \n",
    "    return image"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 导入数据\n",
    "def load_data (path):\n",
    "    data = tf.io.read_file(path)\n",
    "    return preprocess_image(data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_data(datas):\n",
    "    return [math.fabs(2) if math.fabs(i) >3 else i for i in datas ]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 0/1067 [00:00<?, ?it/s]2023-03-13 13:50:39.258268: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations:  SSE3 SSE4.1 SSE4.2 AVX AVX2 FMA\n",
      "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
      "2023-03-13 13:50:39.261027: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 15609 MB memory:  -> device: 0, name: Z100SM, pci bus id: 0000:04:00.0\n",
      "2023-03-13 13:50:39.269121: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 15609 MB memory:  -> device: 1, name: Z100SM, pci bus id: 0000:26:00.0\n",
      "100%|██████████| 1067/1067 [00:12<00:00, 83.91it/s] \n"
     ]
    }
   ],
   "source": [
    "image_array = []\n",
    "for i in tqdm(all_image_path):\n",
    "    image = load_data(i)\n",
    "    image_array.append(image)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "image_array = np.array(image_array)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_X,test_X, train_y, test_y = train_test_split(image_array,\n",
    "                                                   all_image_label,\n",
    "                                                   test_size = 0.2,\n",
    "                                                   random_state = 0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "image_label_ds = tf.data.Dataset.from_tensor_slices((train_X,train_y)).batch(30)\n",
    "test_image_label_ds = tf.data.Dataset.from_tensor_slices((test_X, test_y)).batch(30)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(30, 224, 224, 3) tf.Tensor([2 1 2 1 1 1 3 3 1 2 2 2 2 2 2 2 1 1 2 2 1 2 1 2 2 2 0 0 1 2], shape=(30,), dtype=int32)\n",
      "(30, 224, 224, 3) tf.Tensor([1 3 0 2 2 2 0 2 2 2 1 2 2 1 1 2 1 1 3 1 2 1 1 2 3 1 0 1 2 2], shape=(30,), dtype=int32)\n",
      "(30, 224, 224, 3) tf.Tensor([1 2 1 0 0 2 0 2 1 0 2 3 1 1 2 3 1 2 2 2 1 2 1 2 2 3 1 1 2 3], shape=(30,), dtype=int32)\n",
      "(30, 224, 224, 3) tf.Tensor([3 0 1 1 1 2 0 1 3 2 1 0 2 1 2 0 3 2 2 2 2 2 1 2 2 2 0 3 1 0], shape=(30,), dtype=int32)\n",
      "(30, 224, 224, 3) tf.Tensor([2 2 3 2 2 2 2 1 1 2 0 0 2 2 2 2 2 3 1 2 3 2 2 3 3 2 2 2 2 1], shape=(30,), dtype=int32)\n",
      "(30, 224, 224, 3) tf.Tensor([1 2 2 2 2 0 2 2 1 1 2 2 0 1 2 1 0 1 0 1 3 1 0 2 0 2 2 1 2 2], shape=(30,), dtype=int32)\n",
      "(30, 224, 224, 3) tf.Tensor([1 2 1 1 3 2 1 1 0 1 2 2 2 1 2 1 2 1 1 1 1 2 1 0 1 1 1 3 3 1], shape=(30,), dtype=int32)\n",
      "(30, 224, 224, 3) tf.Tensor([2 1 1 1 2 0 1 0 1 2 2 1 2 1 2 0 3 1 2 2 1 1 3 1 2 2 3 1 0 3], shape=(30,), dtype=int32)\n",
      "(30, 224, 224, 3) tf.Tensor([3 1 1 0 2 3 2 1 2 2 1 1 1 2 1 1 2 0 0 0 1 3 1 1 2 2 2 1 1 3], shape=(30,), dtype=int32)\n",
      "(30, 224, 224, 3) tf.Tensor([2 3 1 0 3 1 2 2 2 2 1 1 2 1 1 2 2 2 2 1 0 2 0 2 2 1 1 3 0 2], shape=(30,), dtype=int32)\n",
      "(30, 224, 224, 3) tf.Tensor([1 0 1 2 1 1 2 2 2 1 3 2 2 2 3 3 1 2 1 2 1 1 3 2 2 1 2 1 1 3], shape=(30,), dtype=int32)\n",
      "(30, 224, 224, 3) tf.Tensor([1 2 2 1 1 1 2 2 2 2 2 2 0 2 1 2 0 1 3 2 2 2 1 2 2 2 2 2 1 2], shape=(30,), dtype=int32)\n",
      "(30, 224, 224, 3) tf.Tensor([0 1 1 1 1 1 2 1 3 1 1 3 3 1 0 2 1 1 1 2 3 3 1 2 1 3 2 2 1 2], shape=(30,), dtype=int32)\n",
      "(30, 224, 224, 3) tf.Tensor([2 1 2 2 1 0 1 1 2 1 1 2 0 1 2 3 1 1 3 2 1 2 3 2 2 2 1 3 1 2], shape=(30,), dtype=int32)\n",
      "(30, 224, 224, 3) tf.Tensor([1 1 2 0 1 0 2 1 2 2 2 3 2 0 3 2 0 2 0 2 1 2 2 3 0 3 2 0 2 2], shape=(30,), dtype=int32)\n",
      "(30, 224, 224, 3) tf.Tensor([2 3 0 2 3 2 3 1 1 2 1 1 1 1 2 2 2 1 2 0 2 1 1 2 2 2 2 1 1 0], shape=(30,), dtype=int32)\n",
      "(30, 224, 224, 3) tf.Tensor([1 2 3 1 2 1 3 2 2 3 2 1 2 2 3 2 2 1 2 0 2 1 1 3 0 0 2 2 0 2], shape=(30,), dtype=int32)\n",
      "(30, 224, 224, 3) tf.Tensor([2 1 1 1 3 2 2 2 2 1 2 2 2 0 1 1 0 2 0 1 1 1 2 1 2 1 1 2 3 0], shape=(30,), dtype=int32)\n",
      "(30, 224, 224, 3) tf.Tensor([3 2 1 2 2 1 2 2 1 2 1 2 3 2 3 1 1 3 2 2 2 0 2 2 0 0 1 2 1 2], shape=(30,), dtype=int32)\n",
      "(30, 224, 224, 3) tf.Tensor([1 2 2 3 2 2 2 3 1 1 2 1 1 2 1 3 1 3 0 2 0 2 3 2 2 2 2 3 2 3], shape=(30,), dtype=int32)\n",
      "(30, 224, 224, 3) tf.Tensor([2 1 1 1 2 2 2 3 2 1 0 2 1 2 1 2 1 1 2 0 3 3 1 1 1 3 2 1 0 2], shape=(30,), dtype=int32)\n",
      "(30, 224, 224, 3) tf.Tensor([2 3 3 2 2 2 2 0 3 2 2 2 2 1 0 1 2 3 3 2 2 2 2 1 1 2 1 2 1 1], shape=(30,), dtype=int32)\n",
      "(30, 224, 224, 3) tf.Tensor([1 2 3 1 1 2 3 3 1 1 0 2 2 0 1 2 2 3 2 3 3 2 0 3 3 0 0 1 1 0], shape=(30,), dtype=int32)\n",
      "(30, 224, 224, 3) tf.Tensor([0 1 3 1 1 1 1 2 2 2 2 2 3 1 1 1 3 0 3 2 2 3 1 2 2 2 3 2 2 1], shape=(30,), dtype=int32)\n",
      "(30, 224, 224, 3) tf.Tensor([2 0 2 3 2 3 3 2 2 0 2 2 1 3 3 3 2 2 1 0 3 2 1 1 0 1 3 3 1 2], shape=(30,), dtype=int32)\n",
      "(30, 224, 224, 3) tf.Tensor([1 1 2 1 1 1 2 2 2 0 3 2 2 1 1 2 2 1 3 3 1 2 1 3 2 1 2 2 2 1], shape=(30,), dtype=int32)\n",
      "(30, 224, 224, 3) tf.Tensor([2 2 3 1 3 1 0 1 1 1 1 0 1 1 1 1 2 2 2 3 1 3 2 2 2 3 2 1 1 1], shape=(30,), dtype=int32)\n",
      "(30, 224, 224, 3) tf.Tensor([1 0 3 1 2 2 2 2 1 1 1 2 2 0 1 0 3 2 1 1 1 1 3 1 0 0 1 1 1 1], shape=(30,), dtype=int32)\n",
      "(13, 224, 224, 3) tf.Tensor([1 2 1 2 0 0 1 1 2 2 1 1 2], shape=(13,), dtype=int32)\n"
     ]
    }
   ],
   "source": [
    "for i in image_label_ds:\n",
    "    print(i[0].shape,i[1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{0: 114, 1: 376, 2: 438, 3: 139}\n"
     ]
    }
   ],
   "source": [
    "list1=all_image_label\n",
    "set1=set(list1)\n",
    "dict1={}\n",
    "for item in set1:\n",
    "    dict1.update({item:list1.count(item)})\n",
    "print(dict1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# VGG16"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 163,
   "metadata": {},
   "outputs": [],
   "source": [
    "#VGG16模型搭建\n",
    "class VGG16(Model):\n",
    "    def __init__(self):\n",
    "        super(VGG16, self).__init__()\n",
    "        self.c1 = Conv2D(filters=64, kernel_size=(3, 3), padding='same')  # 卷积层1\n",
    "        self.b1 = BatchNormalization()  # BN层1\n",
    "        self.a1 = Activation('relu')  # 激活层1\n",
    "        self.c2 = Conv2D(filters=64, kernel_size=(3, 3), padding='same', )\n",
    "        self.b2 = BatchNormalization()  # BN层1\n",
    "        self.a2 = Activation('relu')  # 激活层1\n",
    "        self.p1 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')\n",
    "        self.d1 = Dropout(0.2)  # dropout层\n",
    "\n",
    "        self.c3 = Conv2D(filters=128, kernel_size=(3, 3), padding='same')\n",
    "        self.b3 = BatchNormalization()  # BN层1\n",
    "        self.a3 = Activation('relu')  # 激活层1\n",
    "        self.c4 = Conv2D(filters=128, kernel_size=(3, 3), padding='same')\n",
    "        self.b4 = BatchNormalization()  # BN层1\n",
    "        self.a4 = Activation('relu')  # 激活层1\n",
    "        self.p2 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')\n",
    "        self.d2 = Dropout(0.2)  # dropout层\n",
    "\n",
    "        self.c5 = Conv2D(filters=256, kernel_size=(3, 3), padding='same')\n",
    "        self.b5 = BatchNormalization()  # BN层1\n",
    "        self.a5 = Activation('relu')  # 激活层1\n",
    "        self.c6 = Conv2D(filters=256, kernel_size=(3, 3), padding='same')\n",
    "        self.b6 = BatchNormalization()  # BN层1\n",
    "        self.a6 = Activation('relu')  # 激活层1\n",
    "        self.c7 = Conv2D(filters=256, kernel_size=(3, 3), padding='same')\n",
    "        self.b7 = BatchNormalization()\n",
    "        self.a7 = Activation('relu')\n",
    "        self.p3 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')\n",
    "        self.d3 = Dropout(0.2)\n",
    "\n",
    "        self.c8 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')\n",
    "        self.b8 = BatchNormalization()  # BN层1\n",
    "        self.a8 = Activation('relu')  # 激活层1\n",
    "        self.c9 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')\n",
    "        self.b9 = BatchNormalization()  # BN层1\n",
    "        self.a9 = Activation('relu')  # 激活层1\n",
    "        self.c10 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')\n",
    "        self.b10 = BatchNormalization()\n",
    "        self.a10 = Activation('relu')\n",
    "        self.p4 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')\n",
    "        self.d4 = Dropout(0.2)\n",
    "\n",
    "        self.c11 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')\n",
    "        self.b11 = BatchNormalization()  # BN层1\n",
    "        self.a11 = Activation('relu')  # 激活层1\n",
    "        self.c12 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')\n",
    "        self.b12 = BatchNormalization()  # BN层1\n",
    "        self.a12 = Activation('relu')  # 激活层1\n",
    "        self.c13 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')\n",
    "        self.b13 = BatchNormalization()\n",
    "        self.a13 = Activation('relu')\n",
    "        self.p5 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')\n",
    "        self.d5 = Dropout(0.2)\n",
    "\n",
    "        self.flatten = Flatten()\n",
    "        self.f1 = Dense(512, activation='relu')\n",
    "        self.d6 = Dropout(0.2)\n",
    "        self.f2 = Dense(512, activation='relu')\n",
    "        self.d7 = Dropout(0.2)\n",
    "        self.f3 = Dense(4, activation='softmax')\n",
    "\n",
    "    def call(self, x):\n",
    "        x = self.c1(x)\n",
    "        x = self.b1(x)\n",
    "        x = self.a1(x)\n",
    "        x = self.c2(x)\n",
    "        x = self.b2(x)\n",
    "        x = self.a2(x)\n",
    "        x = self.p1(x)\n",
    "        x = self.d1(x)\n",
    "\n",
    "        x = self.c3(x)\n",
    "        x = self.b3(x)\n",
    "        x = self.a3(x)\n",
    "        x = self.c4(x)\n",
    "        x = self.b4(x)\n",
    "        x = self.a4(x)\n",
    "        x = self.p2(x)\n",
    "        x = self.d2(x)\n",
    "\n",
    "        x = self.c5(x)\n",
    "        x = self.b5(x)\n",
    "        x = self.a5(x)\n",
    "        x = self.c6(x)\n",
    "        x = self.b6(x)\n",
    "        x = self.a6(x)\n",
    "        x = self.c7(x)\n",
    "        x = self.b7(x)\n",
    "        x = self.a7(x)\n",
    "        x = self.p3(x)\n",
    "        x = self.d3(x)\n",
    "\n",
    "        x = self.c8(x)\n",
    "        x = self.b8(x)\n",
    "        x = self.a8(x)\n",
    "        x = self.c9(x)\n",
    "        x = self.b9(x)\n",
    "        x = self.a9(x)\n",
    "        x = self.c10(x)\n",
    "        x = self.b10(x)\n",
    "        x = self.a10(x)\n",
    "        x = self.p4(x)\n",
    "        x = self.d4(x)\n",
    "\n",
    "        x = self.c11(x)\n",
    "        x = self.b11(x)\n",
    "        x = self.a11(x)\n",
    "        x = self.c12(x)\n",
    "        x = self.b12(x)\n",
    "        x = self.a12(x)\n",
    "        x = self.c13(x)\n",
    "        x = self.b13(x)\n",
    "        x = self.a13(x)\n",
    "        x = self.p5(x)\n",
    "        x = self.d5(x)\n",
    "\n",
    "        x = self.flatten(x)\n",
    "        x = self.f1(x)\n",
    "        x = self.d6(x)\n",
    "        x = self.f2(x)\n",
    "        x = self.d7(x)\n",
    "        y = self.f3(x)\n",
    "        return y"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 164,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = VGG16()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 165,
   "metadata": {},
   "outputs": [],
   "source": [
    "model.compile(optimizer='adam', # 优化器\n",
    "              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), # 损失函数\n",
    "              metrics=['accuracy']) # 指标"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 167,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/10\n",
      "29/29 [==============================] - 7s 243ms/step - loss: 1.3804 - accuracy: 0.4478 - lr: 0.0100\n",
      "Epoch 2/10\n",
      "29/29 [==============================] - 7s 243ms/step - loss: 1.5610 - accuracy: 0.4619 - lr: 0.0090\n",
      "Epoch 3/10\n",
      "29/29 [==============================] - 7s 243ms/step - loss: 1.7181 - accuracy: 0.4513 - lr: 0.0081\n",
      "Epoch 4/10\n",
      "29/29 [==============================] - 7s 244ms/step - loss: 1.8266 - accuracy: 0.4596 - lr: 0.0073\n",
      "Epoch 5/10\n",
      "29/29 [==============================] - 7s 243ms/step - loss: 1.8031 - accuracy: 0.4619 - lr: 0.0066\n",
      "Epoch 6/10\n",
      "29/29 [==============================] - 7s 243ms/step - loss: 1.5513 - accuracy: 0.4795 - lr: 0.0059\n",
      "Epoch 7/10\n",
      "29/29 [==============================] - 7s 243ms/step - loss: 1.4311 - accuracy: 0.4455 - lr: 0.0053\n",
      "Epoch 8/10\n",
      "29/29 [==============================] - 7s 244ms/step - loss: 1.1940 - accuracy: 0.4607 - lr: 0.0048\n",
      "Epoch 9/10\n",
      "29/29 [==============================] - 7s 243ms/step - loss: 1.3134 - accuracy: 0.4736 - lr: 0.0043\n",
      "Epoch 10/10\n",
      "29/29 [==============================] - 7s 244ms/step - loss: 1.2945 - accuracy: 0.4572 - lr: 0.0039\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<keras.callbacks.History at 0x7f80e5cd4290>"
      ]
     },
     "execution_count": 167,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 训练数据导入\n",
    "model.fit(\n",
    "        image_label_ds, epochs=10,\n",
    "        callbacks=[tf.keras.callbacks.LearningRateScheduler(lambda epoch: 1e-2 * (0.9 ** epoch))],# 学习率训练, epochs=5代表训练5次\n",
    "        )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2023-03-08 10:38:29.356199: W tensorflow/python/util/util.cc:368] Sets are not currently considered sequences, but this may change in the future, so consider avoiding using them.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: ./Vgg16/assets\n"
     ]
    }
   ],
   "source": [
    "save_path = './Vgg16'\n",
    "model.save(save_path)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# resnet50"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [],
   "source": [
    "# GRADED FUNCTION: identity_block\n",
    "\n",
    "def identity_block(X, f, filters, stage, block):\n",
    "    \"\"\"\n",
    "    Implementation of the identity block as defined in Figure 4\n",
    "    \n",
    "    Arguments:\n",
    "    X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)\n",
    "    f -- integer, specifying the shape of the middle CONV's window for the main path\n",
    "    filters -- python list of integers, defining the number of filters in the CONV layers of the main path\n",
    "    stage -- integer, used to name the layers, depending on their position in the network\n",
    "    block -- string/character, used to name the layers, depending on their position in the network\n",
    "    \n",
    "    Returns:\n",
    "    X -- output of the identity block, tensor of shape (n_H, n_W, n_C)\n",
    "    \"\"\"\n",
    "    \n",
    "    # defining name basis\n",
    "    conv_name_base = \"res\" + str(stage) + block + \"_branch\"\n",
    "    bn_name_base   = \"bn\"  + str(stage) + block + \"_branch\"\n",
    "    \n",
    "    # Retrieve Filters\n",
    "    F1, F2, F3 = filters\n",
    "    \n",
    "    # Save the input value. You'll need this later to add back to the main path. \n",
    "    X_shortcut = X\n",
    "    \n",
    "    # First component of main path\n",
    "    X = Conv2D(filters=F1, kernel_size=(1, 1), strides=(1, 1), padding=\"valid\", \n",
    "               name=conv_name_base+\"2a\", kernel_initializer=glorot_uniform(seed=0))(X)\n",
    "    #valid mean no padding / glorot_uniform equal to Xaiver initialization - Steve \n",
    "    \n",
    "    X = BatchNormalization(axis=3, name=bn_name_base + \"2a\")(X)\n",
    "    X = Activation(\"relu\")(X)\n",
    "    ### START CODE HERE ###\n",
    "    \n",
    "    # Second component of main path (≈3 lines)\n",
    "    X = Conv2D(filters=F2, kernel_size=(f, f), strides=(1, 1), padding=\"same\",\n",
    "               name=conv_name_base+\"2b\", kernel_initializer=glorot_uniform(seed=0))(X)\n",
    "    X = BatchNormalization(axis=3, name=bn_name_base+\"2b\")(X)\n",
    "    X = Activation(\"relu\")(X)\n",
    "    # Third component of main path (≈2 lines)\n",
    "\n",
    "\n",
    "    # Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines)\n",
    "    X = Conv2D(filters=F3, kernel_size=(1, 1), strides=(1, 1), padding=\"valid\",\n",
    "               name=conv_name_base+\"2c\", kernel_initializer=glorot_uniform(seed=0))(X)\n",
    "    X = BatchNormalization(axis=3, name=bn_name_base+\"2c\")(X)\n",
    "    \n",
    "    X = Add()([X, X_shortcut])\n",
    "    X = Activation(\"relu\")(X)\n",
    "    ### END CODE HERE ###\n",
    "    \n",
    "    return X\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [],
   "source": [
    "# GRADED FUNCTION: convolutional_block\n",
    "\n",
    "def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):\n",
    "\n",
    "    # 64,64,256\n",
    "    filters1, filters2, filters3 = filters\n",
    "\n",
    "    conv_name_base = 'res' + str(stage) + block + '_branch'\n",
    "    bn_name_base = 'bn' + str(stage) + block + '_branch'\n",
    "\n",
    "    # 降维\n",
    "    x = Conv2D(filters1, (1, 1), strides=strides,\n",
    "               name=conv_name_base + '2a')(input_tensor)\n",
    "    x = BatchNormalization(name=bn_name_base + '2a')(x)\n",
    "    x = Activation('relu')(x)\n",
    "\n",
    "    # 3x3卷积\n",
    "    x = Conv2D(filters2, kernel_size, padding='same',\n",
    "               name=conv_name_base + '2b')(x)\n",
    "    x = BatchNormalization(name=bn_name_base + '2b')(x)\n",
    "    x = Activation('relu')(x)\n",
    "\n",
    "    # 升维\n",
    "    x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)\n",
    "    x = BatchNormalization(name=bn_name_base + '2c')(x)\n",
    "\n",
    "    # 残差边\n",
    "    shortcut = Conv2D(filters3, (1, 1), strides=strides,\n",
    "                      name=conv_name_base + '1')(input_tensor)\n",
    "    shortcut = BatchNormalization(name=bn_name_base + '1')(shortcut)\n",
    "\n",
    "\n",
    "    x = layers.add([x, shortcut])\n",
    "    x = Activation('relu')(x)\n",
    "    return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 150,
   "metadata": {},
   "outputs": [],
   "source": [
    "def ResNet50(input_shape=[224,224,3],classes=1000):\n",
    "    # [224,224,3]\n",
    "    img_input = Input(shape=input_shape)\n",
    "    print(img_input)\n",
    "    x = ZeroPadding2D((3, 3))(img_input)   # [230,230,3]\n",
    "    # [112,112,64]\n",
    "    x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)   #[112,112,64]\n",
    "    x = BatchNormalization(name='bn_conv1')(x)\n",
    "    x = Activation('relu')(x)\n",
    "\n",
    "    # [56,56,64]\n",
    "    x = MaxPooling2D((3, 3), strides=(2, 2))(x)\n",
    "\n",
    "    # [56,56,256]\n",
    "    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))\n",
    "    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')\n",
    "    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')\n",
    "\n",
    "    # [28,28,512]\n",
    "    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')\n",
    "    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')\n",
    "    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')\n",
    "    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')\n",
    "\n",
    "    # [14,14,1024]\n",
    "    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')\n",
    "    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')\n",
    "    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')\n",
    "    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')\n",
    "    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')\n",
    "    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')\n",
    "\n",
    "    # [7,7,2048]\n",
    "    x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')\n",
    "    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')\n",
    "    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')\n",
    "\n",
    "    # 代替全连接层\n",
    "    x = AveragePooling2D((7, 7), name='avg_pool')(x)\n",
    "\n",
    "    # 进行预测\n",
    "    x = Flatten()(x)\n",
    "    x = Dense(classes, activation='softmax', name='fc1000')(x)\n",
    "    print(x.shape)\n",
    "    model = Model(img_input, x, name='resnet50')\n",
    "\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 151,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "KerasTensor(type_spec=TensorSpec(shape=(None, 224, 224, 3), dtype=tf.float32, name='input_2'), name='input_2', description=\"created by layer 'input_2'\")\n",
      "(None, 4)\n"
     ]
    }
   ],
   "source": [
    "resnet_model = ResNet50(input_shape=(224, 224, 3), classes=4)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 153,
   "metadata": {},
   "outputs": [],
   "source": [
    "resnet_model.compile(optimizer='adam', # 优化器\n",
    "              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), # 损失函数\n",
    "              metrics=['accuracy']) # 指标"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 154,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/lib/python3.7/site-packages/tensorflow/python/util/dispatch.py:1096: UserWarning: \"`sparse_categorical_crossentropy` received `from_logits=True`, but the `output` argument was produced by a sigmoid or softmax activation and thus does not represent logits. Was this intended?\"\n",
      "  return dispatch_target(*args, **kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "29/29 [==============================] - 42s 585ms/step - loss: 1.7609 - accuracy: 0.5744\n",
      "Epoch 2/20\n",
      "29/29 [==============================] - 5s 161ms/step - loss: 0.8360 - accuracy: 0.6811\n",
      "Epoch 3/20\n",
      "29/29 [==============================] - 5s 161ms/step - loss: 0.7447 - accuracy: 0.6917\n",
      "Epoch 4/20\n",
      "29/29 [==============================] - 5s 161ms/step - loss: 0.9521 - accuracy: 0.6893\n",
      "Epoch 5/20\n",
      "29/29 [==============================] - 5s 161ms/step - loss: 1.1732 - accuracy: 0.6694\n",
      "Epoch 6/20\n",
      "29/29 [==============================] - 5s 162ms/step - loss: 0.7925 - accuracy: 0.6952\n",
      "Epoch 7/20\n",
      "29/29 [==============================] - 5s 161ms/step - loss: 0.8473 - accuracy: 0.6846\n",
      "Epoch 8/20\n",
      "29/29 [==============================] - 5s 161ms/step - loss: 0.7579 - accuracy: 0.6788\n",
      "Epoch 9/20\n",
      "29/29 [==============================] - 5s 161ms/step - loss: 0.7008 - accuracy: 0.7222\n",
      "Epoch 10/20\n",
      "29/29 [==============================] - 5s 161ms/step - loss: 0.6722 - accuracy: 0.7362\n",
      "Epoch 11/20\n",
      "29/29 [==============================] - 5s 161ms/step - loss: 0.6546 - accuracy: 0.7374\n",
      "Epoch 12/20\n",
      "29/29 [==============================] - 5s 161ms/step - loss: 0.6421 - accuracy: 0.7491\n",
      "Epoch 13/20\n",
      "29/29 [==============================] - 5s 161ms/step - loss: 0.6889 - accuracy: 0.7374\n",
      "Epoch 14/20\n",
      "29/29 [==============================] - 5s 161ms/step - loss: 0.6532 - accuracy: 0.7339\n",
      "Epoch 15/20\n",
      "29/29 [==============================] - 5s 161ms/step - loss: 0.6307 - accuracy: 0.7702\n",
      "Epoch 16/20\n",
      "29/29 [==============================] - 5s 161ms/step - loss: 0.6349 - accuracy: 0.7562\n",
      "Epoch 17/20\n",
      "29/29 [==============================] - 5s 161ms/step - loss: 0.6741 - accuracy: 0.7304\n",
      "Epoch 18/20\n",
      "29/29 [==============================] - 5s 161ms/step - loss: 0.5904 - accuracy: 0.7526\n",
      "Epoch 19/20\n",
      "29/29 [==============================] - 5s 161ms/step - loss: 0.5794 - accuracy: 0.7737\n",
      "Epoch 20/20\n",
      "29/29 [==============================] - 5s 161ms/step - loss: 0.6204 - accuracy: 0.7526\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<keras.callbacks.History at 0x7f80efcf0bd0>"
      ]
     },
     "execution_count": 154,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "resnet_model.fit(image_label_ds, epochs = 20)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# resnet50+vgg16"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 81,
   "metadata": {},
   "outputs": [],
   "source": [
    "# vgg16\n",
    "model = Sequential()\n",
    "\n",
    "model.add(Conv2D(64,(3,3), strides = (1,1), input_shape = (224,224,3), padding = 'same', activation = 'relu'))\n",
    "model.add(Conv2D(64,(3,3), strides = (1,1), padding = 'same', activation = 'relu'))\n",
    "model.add(MaxPooling2D((2,2), strides = (2,2)))\n",
    "\n",
    "model.add(Conv2D(128,(3,3), strides = (1,1), padding = 'same', activation = 'relu'))\n",
    "model.add(Conv2D(128,(3,3), strides = (1,1), padding = 'same', activation = 'relu'))\n",
    "model.add(MaxPooling2D((2,2), strides = (2,2)))\n",
    "\n",
    "model.add(Conv2D(256,(3,3), strides = (1,1), padding = 'same', activation = 'relu'))\n",
    "model.add(Conv2D(256,(3,3), strides = (1,1), padding = 'same', activation = 'relu'))\n",
    "model.add(Conv2D(256,(3,3), strides = (1,1), padding = 'same', activation = 'relu'))\n",
    "model.add(MaxPooling2D((2,2), strides = (2,2)))\n",
    "\n",
    "model.add(Conv2D(512,(3,3), strides = (1,1), padding = 'same', activation = 'relu'))\n",
    "model.add(Conv2D(512,(3,3), strides = (1,1), padding = 'same', activation = 'relu'))\n",
    "model.add(Conv2D(512,(3,3), strides = (1,1), padding = 'same', activation = 'relu'))\n",
    "model.add(MaxPooling2D((2,2), strides = (2,2)))\n",
    "\n",
    "model.add(Conv2D(512,(3,3), strides = (1,1), padding = 'same', activation = 'relu'))\n",
    "model.add(Conv2D(512,(3,3), strides = (1,1), padding = 'same', activation = 'relu'))\n",
    "model.add(Conv2D(512,(3,3), strides = (1,1), padding = 'same', activation = 'relu'))\n",
    "model.add(MaxPooling2D((2,2), strides = (2,2)))\n",
    "\n",
    "model.add(Flatten())\n",
    "# model.add(Dense(4096, activation = 'relu'))\n",
    "# model.add(Dense(4096, activation = 'relu'))\n",
    "# model.add(Dense(4, activation = 'softmax'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 94,
   "metadata": {},
   "outputs": [],
   "source": [
    "resnet50_model = keras.models.Sequential()\n",
    "resnet50_model.add(keras.applications.ResNet50(include_top = False, # 网络结构的最后一层,resnet50有1000类,去掉最后一层\n",
    "                                                   pooling = 'avg', #resnet50模型倒数第二层的输出是三维矩阵-卷积层的输出,做pooling或展平\n",
    "                                                   weights = None)) # 参数有两种imagenet和None,None为从头开始训练,imagenet为从网络下载已训练好的模型开始训练\n",
    "resnet50_model.add(keras.layers.Dense(128, activation = 'relu')) # 因为include_top = False,所以需要自己定义最后一层\n",
    "resnet50_model.layers[0].trainable = False # 因为参数是从imagenet初始化的,所以我们可以只调整最后一层的参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 95,
   "metadata": {},
   "outputs": [],
   "source": [
    "merged = Concatenate()([model.output,resnet50_model.output])\n",
    "z = Dense(128, activation=\"relu\")(merged)\n",
    "z = Dropout(0.25)(z)\n",
    "# z = Dense(1024, activation=\"relu\")(z)\n",
    "z = Dense(4, activation=\"softmax\")(z)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 96,
   "metadata": {},
   "outputs": [],
   "source": [
    "merged_model = Model(inputs=[resnet50_model.input, model.input], outputs=z)\n",
    "\n",
    "merged_model.compile(optimizer='adam', # 优化器\n",
    "              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), # 损失函数\n",
    "              metrics=['accuracy']) # 指标"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "result = tf.convert_to_tensor(image_array)\n",
    "result_label = tf.convert_to_tensor(get_data(all_image_label))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "tf.config.experimental_run_functions_eagerly(True)\n",
    "merged_model.fit([result,result], result_label,epochs =10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
