{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'C:\\\\Users\\\\86137\\\\.keras\\\\datasets\\\\flower_photos\\\\tulips'"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow as tf \n",
    "import pathlib\n",
    "from tensorflow import keras\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import os\n",
    "import random\n",
    "AUTOTUNE = tf.data.experimental.AUTOTUNE"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "#数据所在文件夹\n",
    "data_path = 'I:\\\\Skin_cancer\\\\archive\\\\Skin\\\\Train'\n",
    "data_root = pathlib.Path(data_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# 获取图片路径\n",
    "all_image_path = list(data_root.glob('*/*'))\n",
    "all_image_path = [str(item) for item in all_image_path]\n",
    "random.shuffle(all_image_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "232.4"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(all_image_path)/5"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 获取类标签\n",
    "image_label = []\n",
    "for item in data_root.glob('*/'):\n",
    "    if str(item)[-3:] !=  'txt':\n",
    "        image_label.append(item.name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 为标签添加索引\n",
    "label_dict = dict()\n",
    "for name,index in enumerate(image_label):\n",
    "    label_dict[index] = name"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 图片数据对应的标签\n",
    "all_image_label = []\n",
    "for path in all_image_path:\n",
    "    all_image_label.append(int(label_dict[pathlib.Path(path).parent.name]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['actinic keratosis',\n",
       " 'basal cell carcinoma',\n",
       " 'dermatofibroma',\n",
       " 'melanoma',\n",
       " 'vascular lesion']"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "image_label"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 图片数据处理\n",
    "def preprocess_image(image):\n",
    "    image = tf.image.decode_jpeg(image, channels=3)\n",
    "    image = tf.image.resize(image, [224,224])\n",
    "    image /= 255.0 \n",
    "\n",
    "    return image"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 导入数据\n",
    "def load_data (path):\n",
    "    data = tf.io.read_file(path)\n",
    "    return preprocess_image(data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 图片dataset数据\n",
    "path_ds = tf.data.Dataset.from_tensor_slices(all_image_path)\n",
    "image_ds = path_ds.map(load_data,num_parallel_calls=AUTOTUNE).batch(5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "#标签dataset数据\n",
    "label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(all_image_label, tf.int64)).batch(5)\n",
    "# 数据合并\n",
    "image_label_ds = tf.data.Dataset.zip((image_ds, label_ds))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<BatchDataset shapes: (None,), types: tf.int64>"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "label_ds"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<BatchDataset shapes: (None, 224, 224, 3), types: tf.float32>"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "image_ds"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model: \"model\"\n",
      "_________________________________________________________________\n",
      "Layer (type)                 Output Shape              Param #   \n",
      "=================================================================\n",
      "input_1 (InputLayer)         [(None, 224, 224, 3)]     0         \n",
      "_________________________________________________________________\n",
      "conv1/conv (Conv2D)          (None, 112, 112, 64)      9408      \n",
      "_________________________________________________________________\n",
      "conv1/bn (BatchNormalization (None, 112, 112, 64)      256       \n",
      "_________________________________________________________________\n",
      "re_lu (ReLU)                 (None, 112, 112, 64)      0         \n",
      "_________________________________________________________________\n",
      "maxpool_1 (MaxPooling2D)     (None, 56, 56, 64)        0         \n",
      "_________________________________________________________________\n",
      "conv2/conv (Conv2D)          (None, 56, 56, 64)        4096      \n",
      "_________________________________________________________________\n",
      "conv2/bn (BatchNormalization (None, 56, 56, 64)        256       \n",
      "_________________________________________________________________\n",
      "re_lu_1 (ReLU)               (None, 56, 56, 64)        0         \n",
      "_________________________________________________________________\n",
      "conv3/conv (Conv2D)          (None, 56, 56, 192)       110592    \n",
      "_________________________________________________________________\n",
      "conv3/bn (BatchNormalization (None, 56, 56, 192)       768       \n",
      "_________________________________________________________________\n",
      "re_lu_2 (ReLU)               (None, 56, 56, 192)       0         \n",
      "_________________________________________________________________\n",
      "maxpool_2 (MaxPooling2D)     (None, 28, 28, 192)       0         \n",
      "_________________________________________________________________\n",
      "inception3a (Inception)      (None, 28, 28, 256)       156608    \n",
      "_________________________________________________________________\n",
      "inception3b (Inception)      (None, 28, 28, 480)       341504    \n",
      "_________________________________________________________________\n",
      "maxpool_3 (MaxPooling2D)     (None, 14, 14, 480)       0         \n",
      "_________________________________________________________________\n",
      "inception4a (Inception)      (None, 14, 14, 512)       365760    \n",
      "_________________________________________________________________\n",
      "inception4b (Inception)      (None, 14, 14, 512)       426528    \n",
      "_________________________________________________________________\n",
      "inception4c (Inception)      (None, 14, 14, 512)       487520    \n",
      "_________________________________________________________________\n",
      "inception4d (Inception)      (None, 14, 14, 528)       574720    \n",
      "_________________________________________________________________\n",
      "inception4e (Inception)      (None, 14, 14, 832)       805888    \n",
      "_________________________________________________________________\n",
      "maxpool_4 (MaxPooling2D)     (None, 7, 7, 832)         0         \n",
      "_________________________________________________________________\n",
      "inception5a (Inception)      (None, 7, 7, 832)         980992    \n",
      "_________________________________________________________________\n",
      "inception5b (Inception)      (None, 7, 7, 1024)        1349568   \n",
      "_________________________________________________________________\n",
      "avgpool_1 (AveragePooling2D) (None, 1, 1, 1024)        0         \n",
      "_________________________________________________________________\n",
      "output_flatten (Flatten)     (None, 1024)              0         \n",
      "_________________________________________________________________\n",
      "output_dropout (Dropout)     (None, 1024)              0         \n",
      "_________________________________________________________________\n",
      "fc (Dense)                   (None, 5)                 5125      \n",
      "_________________________________________________________________\n",
      "softmax (Softmax)            (None, 5)                 0         \n",
      "=================================================================\n",
      "Total params: 5,619,589\n",
      "Trainable params: 5,605,029\n",
      "Non-trainable params: 14,560\n",
      "_________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "from tensorflow.keras import layers, models, Model, Sequential\n",
    "\n",
    "\n",
    "def InceptionV1(im_height=224, im_width=224, class_num=5, aux_logits=False):\n",
    "    # tensorflow中的tensor通道排序是NHWC\n",
    "    input_image = layers.Input(shape=(im_height, im_width, 3), dtype=\"float32\")\n",
    "    # (None, 224, 224, 3)\n",
    "    x = layers.Conv2D(64, kernel_size=7, strides=2, padding=\"SAME\", use_bias=False, name=\"conv1/conv\")(input_image)\n",
    "    x = layers.BatchNormalization(momentum=0.9, epsilon=1e-5, name=\"conv1/bn\")(x)\n",
    "    x = layers.ReLU()(x)\n",
    "    # (None, 112, 112, 64)\n",
    "    x = layers.MaxPool2D(pool_size=3, strides=2, padding=\"SAME\", name=\"maxpool_1\")(x)\n",
    "    # (None, 56, 56, 64)\n",
    "    x = layers.Conv2D(64, kernel_size=1, use_bias=False, name=\"conv2/conv\")(x)\n",
    "    x = layers.BatchNormalization(momentum=0.9, epsilon=1e-5, name=\"conv2/bn\")(x)\n",
    "    x = layers.ReLU()(x)\n",
    "    # (None, 56, 56, 64)\n",
    "    x = layers.Conv2D(192, kernel_size=3, padding=\"SAME\", use_bias=False, name=\"conv3/conv\")(x)\n",
    "    x = layers.BatchNormalization(momentum=0.9, epsilon=1e-5, name=\"conv3/bn\")(x)\n",
    "    x = layers.ReLU()(x)\n",
    "    # (None, 56, 56, 192)\n",
    "    x = layers.MaxPool2D(pool_size=3, strides=2, padding=\"SAME\", name=\"maxpool_2\")(x)\n",
    "\n",
    "    # (None, 28, 28, 192)\n",
    "    x = Inception(64, 96, 128, 16, 32, 32, name=\"inception3a\")(x)\n",
    "    # (None, 28, 28, 256)\n",
    "    x = Inception(128, 128, 192, 32, 96, 64, name=\"inception3b\")(x)\n",
    "\n",
    "    # (None, 28, 28, 480)\n",
    "    x = layers.MaxPool2D(pool_size=3, strides=2, padding=\"SAME\", name=\"maxpool_3\")(x)\n",
    "    # (None, 14, 14, 480)\n",
    "    x = Inception(192, 96, 208, 16, 48, 64, name=\"inception4a\")(x)\n",
    "    if aux_logits:\n",
    "        aux1 = InceptionAux(class_num, name=\"aux1\")(x)\n",
    "\n",
    "    # (None, 14, 14, 512)\n",
    "    x = Inception(160, 112, 224, 24, 64, 64, name=\"inception4b\")(x)\n",
    "    # (None, 14, 14, 512)\n",
    "    x = Inception(128, 128, 256, 24, 64, 64, name=\"inception4c\")(x)\n",
    "    # (None, 14, 14, 512)\n",
    "    x = Inception(112, 144, 288, 32, 64, 64, name=\"inception4d\")(x)\n",
    "    if aux_logits:\n",
    "        aux2 = InceptionAux(class_num, name=\"aux2\")(x)\n",
    "\n",
    "    # (None, 14, 14, 528)\n",
    "    x = Inception(256, 160, 320, 32, 128, 128, name=\"inception4e\")(x)\n",
    "    # (None, 14, 14, 532)\n",
    "    x = layers.MaxPool2D(pool_size=2, strides=2, padding=\"SAME\", name=\"maxpool_4\")(x)\n",
    "\n",
    "    # (None, 7, 7, 832)\n",
    "    x = Inception(256, 160, 320, 32, 128, 128, name=\"inception5a\")(x)\n",
    "    # (None, 7, 7, 832)\n",
    "    x = Inception(384, 192, 384, 48, 128, 128, name=\"inception5b\")(x)\n",
    "    # (None, 7, 7, 1024)\n",
    "    x = layers.AvgPool2D(pool_size=7, strides=1, name=\"avgpool_1\")(x)\n",
    "\n",
    "    # (None, 1, 1, 1024)\n",
    "    x = layers.Flatten(name=\"output_flatten\")(x)\n",
    "    # (None, 1024)\n",
    "    x = layers.Dropout(rate=0.4, name=\"output_dropout\")(x)\n",
    "    x = layers.Dense(class_num, name=\"fc\")(x)\n",
    "    # (None, class_num)\n",
    "    aux3 = layers.Softmax()(x)\n",
    "\n",
    "    if aux_logits:\n",
    "        model = models.Model(inputs=input_image, outputs=[aux1, aux2, aux3])\n",
    "    else:\n",
    "        model = models.Model(inputs=input_image, outputs=aux3)\n",
    "    return model\n",
    "\n",
    "\n",
    "class Inception(layers.Layer):\n",
    "    def __init__(self, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj, **kwargs):\n",
    "        super(Inception, self).__init__(**kwargs)\n",
    "        self.branch1 = Sequential([\n",
    "            layers.Conv2D(ch1x1, kernel_size=1, use_bias=False, name=\"conv\"),\n",
    "            layers.BatchNormalization(momentum=0.9, epsilon=1e-5, name=\"bn\"),\n",
    "            layers.ReLU()], name=\"branch1\")\n",
    "\n",
    "        self.branch2 = Sequential([\n",
    "            layers.Conv2D(ch3x3red, kernel_size=1, use_bias=False, name=\"0/conv\"),\n",
    "            layers.BatchNormalization(momentum=0.9, epsilon=1e-5, name=\"0/bn\"),\n",
    "            layers.ReLU(),\n",
    "            layers.Conv2D(ch3x3, kernel_size=3, padding=\"SAME\", use_bias=False, name=\"1/conv\"),\n",
    "            layers.BatchNormalization(momentum=0.9, epsilon=1e-5, name=\"1/bn\"),\n",
    "            layers.ReLU()], name=\"branch2\")      # output_size= input_size\n",
    "\n",
    "        self.branch3 = Sequential([\n",
    "            layers.Conv2D(ch5x5red, kernel_size=1, use_bias=False, name=\"0/conv\"),\n",
    "            layers.BatchNormalization(momentum=0.9, epsilon=1e-5, name=\"0/bn\"),\n",
    "            layers.ReLU(),\n",
    "            layers.Conv2D(ch5x5, kernel_size=3, padding=\"SAME\", use_bias=False, name=\"1/conv\"),\n",
    "            layers.BatchNormalization(momentum=0.9, epsilon=1e-5, name=\"1/bn\"),\n",
    "            layers.ReLU()], name=\"branch3\")      # output_size= input_size\n",
    "\n",
    "        self.branch4 = Sequential([\n",
    "            layers.MaxPool2D(pool_size=3, strides=1, padding=\"SAME\"),  # caution: default strides==pool_size\n",
    "            layers.Conv2D(pool_proj, kernel_size=1, use_bias=False, name=\"1/conv\"),\n",
    "            layers.BatchNormalization(momentum=0.9, epsilon=1e-5, name=\"1/bn\"),\n",
    "            layers.ReLU()], name=\"branch4\")                  # output_size= input_size\n",
    "\n",
    "    def call(self, inputs, **kwargs):\n",
    "        branch1 = self.branch1(inputs)\n",
    "        branch2 = self.branch2(inputs)\n",
    "        branch3 = self.branch3(inputs)\n",
    "        branch4 = self.branch4(inputs)\n",
    "        outputs = layers.concatenate([branch1, branch2, branch3, branch4])\n",
    "        return outputs\n",
    "\n",
    "\n",
    "class InceptionAux(layers.Layer):\n",
    "    def __init__(self, num_classes, **kwargs):\n",
    "        super(InceptionAux, self).__init__(**kwargs)\n",
    "        self.averagePool = layers.AvgPool2D(pool_size=5, strides=3)\n",
    "        self.conv = layers.Conv2D(128, kernel_size=1, use_bias=False, name=\"conv/conv\")\n",
    "        self.bn1 = layers.BatchNormalization(momentum=0.9, epsilon=1e-5, name=\"conv/bn\")\n",
    "        self.rule1 = layers.ReLU()\n",
    "\n",
    "        self.fc1 = layers.Dense(1024, activation=\"relu\", name=\"fc1\")\n",
    "        self.fc2 = layers.Dense(num_classes, name=\"fc2\")\n",
    "        self.softmax = layers.Softmax()\n",
    "\n",
    "    def call(self, inputs, **kwargs):\n",
    "        # aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14\n",
    "        x = self.averagePool(inputs)\n",
    "        # aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4\n",
    "        x = self.conv(x)\n",
    "        x = self.bn1(x)\n",
    "        x = self.rule1(x)\n",
    "        # N x 128 x 4 x 4\n",
    "        x = layers.Flatten()(x)\n",
    "        x = layers.Dropout(rate=0.5)(x)\n",
    "        # N x 2048\n",
    "        x = self.fc1(x)\n",
    "        x = layers.Dropout(rate=0.5)(x)\n",
    "        # N x 1024\n",
    "        x = self.fc2(x)\n",
    "        # N x num_classes\n",
    "        x = self.softmax(x)\n",
    "\n",
    "        return x\n",
    "\n",
    "model = InceptionV1()\n",
    "model.summary()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "model.compile(optimizer='adam', # 优化器\n",
    "              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), # 损失函数\n",
    "              metrics=['accuracy']) # 指标"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train for 233 steps\n",
      "Epoch 1/5\n",
      "233/233 [==============================] - 170s 729ms/step - loss: 1.3890 - accuracy: 0.5120\n",
      "Epoch 2/5\n",
      "233/233 [==============================] - 167s 717ms/step - loss: 1.4131 - accuracy: 0.4897\n",
      "Epoch 3/5\n",
      "233/233 [==============================] - 167s 716ms/step - loss: 1.4260 - accuracy: 0.4768\n",
      "Epoch 4/5\n",
      "233/233 [==============================] - 167s 719ms/step - loss: 1.3770 - accuracy: 0.5293\n",
      "Epoch 5/5\n",
      "233/233 [==============================] - 167s 716ms/step - loss: 1.3829 - accuracy: 0.5189\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<tensorflow.python.keras.callbacks.History at 0x1251b725d08>"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 训练数据导入\n",
    "model.fit(image_label_ds, epochs=5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From D:\\ProgramData\\Anaconda3\\lib\\site-packages\\tensorflow_core\\python\\ops\\resource_variable_ops.py:1786: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "If using Keras pass *_constraint arguments to layers.\n",
      "INFO:tensorflow:Assets written to: I:\\Step\\step_test\\GoogLeNet\\assets\n"
     ]
    }
   ],
   "source": [
    "save_path = 'I:\\\\Step\\\\step_test\\\\GoogLeNet'\n",
    "model.save(save_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/GoogLeNet\\assets\n"
     ]
    }
   ],
   "source": [
    "model.save('saved_model/GoogLeNet')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'daisy': 0, 'dandelion': 1, 'roses': 2, 'sunflowers': 3, 'tulips': 4}"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "3670"
      ]
     },
     "execution_count": 43,
     "metadata": {},
     "output_type": "execute_result"
    },
    {
     "data": {
      "text/plain": [
       "3670"
      ]
     },
     "execution_count": 43,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 74,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 75,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'tulips': 4}"
      ]
     },
     "execution_count": 75,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
