{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "img_width=28\n",
    "img_height=28\n",
    "channels=1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "batch_size=500\n",
    "num_epochs=80\n",
    "iteraions=2\n",
    "nb_augmentation=2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "fashion_classes={0:'T恤',\n",
    "                 1:'裤子',\n",
    "                 2:'套衫',\n",
    "                 3:'裙子',\n",
    "                 4:'外套',\n",
    "                 5:'凉鞋',\n",
    "                 6:'汗衫',\n",
    "                 7:'运动鞋',\n",
    "                 8:'包',\n",
    "                 9:'踝靴'}\n",
    "mnist_classes=[i for i in range(10)]\n",
    "num_classes=10"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train Samples: 60000\n",
      "Test Samples: 10000\n"
     ]
    }
   ],
   "source": [
    "import tensorflow_datasets as tfds\n",
    "train_fasion_mnist=tfds.as_numpy(tfds.load(\"fashion_mnist\",split=\"train\",data_dir=\"./\",download=True,batch_size=-1))\n",
    "X_train,y_train=train_fasion_mnist[\"image\"],train_fasion_mnist[\"label\"]\n",
    "test_fasion_mnist=tfds.as_numpy(tfds.load(\"fashion_mnist\",split=\"test\",data_dir=\"./\",download=True,batch_size=-1))\n",
    "X_test,y_test=test_fasion_mnist[\"image\"],test_fasion_mnist[\"label\"]\n",
    "print(\"Train Samples:\",len(X_train))\n",
    "print(\"Test Samples:\",len(X_test))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[[[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]],\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]],\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]],\n",
       "\n",
       "        ...,\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]],\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]],\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]]],\n",
       "\n",
       "\n",
       "       [[[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]],\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]],\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]],\n",
       "\n",
       "        ...,\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]],\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]],\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]]],\n",
       "\n",
       "\n",
       "       [[[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]],\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]],\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]],\n",
       "\n",
       "        ...,\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]],\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]],\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]]],\n",
       "\n",
       "\n",
       "       ...,\n",
       "\n",
       "\n",
       "       [[[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]],\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]],\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]],\n",
       "\n",
       "        ...,\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]],\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]],\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]]],\n",
       "\n",
       "\n",
       "       [[[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]],\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]],\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [ 75],\n",
       "         [ 15]],\n",
       "\n",
       "        ...,\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [ 64],\n",
       "         [ 38],\n",
       "         [  0]],\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]],\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]]],\n",
       "\n",
       "\n",
       "       [[[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]],\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]],\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]],\n",
       "\n",
       "        ...,\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [150],\n",
       "         [ 66],\n",
       "         [  0]],\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]],\n",
       "\n",
       "        [[  0],\n",
       "         [  0],\n",
       "         [  0],\n",
       "         ...,\n",
       "         [  0],\n",
       "         [  0],\n",
       "         [  0]]]], dtype=uint8)"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "X_train"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([2, 1, 8, ..., 6, 9, 9], dtype=int64)"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "y_train"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAP8AAAD8CAYAAAC4nHJkAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAACllJREFUeJzt3ctvTW0fxvG1VVs9t0rrUCWKVEVISBoSEYYMmEuYIcLAWIw1MZPwFxgaGZgSGkklqDQ0SkScDy2754PSd/S8eQbPun47Flv1+n6m13u3e/dxvWvwW/d95+bm5hIAfhb96Q8A4M+g/IApyg+YovyAKcoPmKL8gCnKD5ii/IApyg+YWlzMX5bL5XidEPjN5ubmcoX873jyA6YoP2CK8gOmKD9givIDpig/YIryA6YoP2CK8gOmKD9givIDpig/YIryA6YoP2CK8gOmirqfHwtPfX29zPP5fJE+ycJx8OBBmXd3d/+S38OTHzBF+QFTlB8wRfkBU5QfMEX5AVOM+ha4RYv0/7//+PFD5jU1NTJ/8uSJzPv7+1OztrY2uXZ8fFzmV65ckfmzZ89Ss/b2drl2ZmZG5i9fvpR5X1+fzN++fZuarV69Wq49cOCAzAvFkx8wRfkBU5QfMEX5AVOUHzBF+QFTlB8wlZubK96t2VzRXXxZ5/wlJSUyv379usyrq6tTs/Lycrm2qqpK5kuWLJF5bW1tahb9XbIaHh6Wufq7rlq1Sq5tamqSeT6f54puAOkoP2CK8gOmKD9givIDpig/YIryA6bYz7/A5XIFjXxTlZaWZvr5s7OzP/27v3//LvOpqalMeRbR96qsrJT55ORkahYddx69Q1AonvyAKcoPmKL8gCnKD5ii/IApyg+YovyAKeb8C1zW8xoaGxtl3tHRIXN1rn/0DkG0X3/xYv3PV/386P2E6O8W3Wfw6tUrmavfH1173tDQIPNC8eQHTFF+wBTlB0xRfsAU5QdMUX7AFKO+BS7rlt6KigqZl5WV/fT6aNtrNOqLjh1Xx3NHW3KjUV+03Thar8aQ3759k2tPnDgh80Lx5AdMUX7AFOUHTFF+wBTlB0xRfsAU5QdMMedf4KJ5dGTXrl0yj2bS6j2D6Jrs6Apvdfx1kuhZe9atzlneMUgS/d16enrk2q6uLplfuHBB5v/gyQ+YovyAKcoPmKL8gCnKD5ii/IApyg+YYs6/AKhZetZ59pEjR2Q+NDQk85KSkp/KCskj6rtHc/roWPCs+/3V729paZFrfxWe/IApyg+YovyAKcoPmKL8gCnKD5ii/IAp5vzm1q1bJ/Pt27fLvLe3V+bquulolp5lVh7l0X77rFd4Z7levLm5Wa79VXjyA6YoP2CK8gOmKD9givIDpig/YIryA6aY888D0b71rHfBKydPnpT59PS0zKPPpubp0feOvleWPJrjR/cRVFVVybysrEzm4+PjqdnKlSvl2mXLlsm8UDz5AVOUHzBF+QFTlB8wRfkBU5QfMMWobx6IxmVZRoGVlZVy7aFDh2T+9OlTmZeWlspcffZo2+vs7Gym3622zUYjzIqKikz548ePZb527drULBpD7t27V+aF4skPmKL8gCnKD5ii/IApyg+YovyAKcoPmGLO/xeI3gNQzp07J/No1p7P52UebS+NtrYqMzMzMo/m4ero7uh719TUyPzmzZsyHxgYkPm2bdtSs7GxMbl2x44dMi8UT37AFOUHTFF+wBTlB0xRfsAU5QdMUX7AFHP+eSDalx4dI93S0pKaHT16VK7t6+uTeXl5ucyjz67eURgaGpJro2u0o79LXV1davbo0SO59u7duzJ//vy5zKO/u/q7fv78Wa6NrlUvFE9+wBTlB0xRfsAU5QdMUX7AFOUHTFF+wBRz/iLIOq+OXLp0KTUbHByUa9VV0Umi98QnSXxNtjofP5rzj46Oylydy58kSfL+/fvUbGRkRK49fPiwzDdu3Cjz6Jrt169fp2YTExNybZYzEv6NJz9givIDpig/YIryA6YoP2CK8gOmKD9gqqhz/uic9Whm/Dt/d0R9tiznxxfi+PHjMld3vXd3d8u1s7OzMm9qapJ5tJ9fzaSj9x9aW1t/+mcnSZJUVFSkZvv375dro3P9P336JPPovICvX7+mZg0NDXLtixcvZF4onvyAKcoPmKL8gCnKD5ii/IApyg+YKuqoLxrlRSOzaDSkRCOpLFtTs44od+7cKfNoLHX79u3ULNr2umnTJpnX19fLvLa2VuZqZBZdwd3c3CzzqakpmU9OTqZm0ZHl0ZbfSFVVlczVv/VoG/a9e/dkHh0b/g+e/IApyg+YovyAKcoPmKL8gCnKD5ii/ICpv+robnXd8+9cm9Xu3btlfvr0aZnfuXNH5mpW39bWJtdG8+jo/Qi1bTbKo22z6njrJInfr1BXWUdXj3/48EHmHz9+lPnDhw9lrrYER3/Tnp4emReKJz9givIDpig/YIryA6YoP2CK8gOmKD9gal7N+aO5rTqieuvWrXJtTU2NzKOzAtRV1tFRy3v27JF5NBOOZvUdHR2pWfS96urqZB7t549m9fl8PjWL3r2Iju6Orhe/du1aanb16lW59sGDBzI/e/aszKNr19W/x5aWFrl2bGxM5oXiyQ+YovyAKcoPmKL8gCnKD5ii/IApyg+Ymldz/q6uLpmr66KzXImcJEny5csXmatz1qOz79+8eSPzzZs3y1y935Akev/3ihUr5NpoP3907n90XbS6njz6bDdu3JD5mTNnZB5dP57F+vXrZR79e1OfLbp6PHq/oVA8+QFTlB8wRfkBU5QfMEX5AVOUHzBV1FHfhg0bZL5v3z6Zj46OpmbRdc5qbZIkSX9/v8zVUc+dnZ1ybbSdOLrmOtr6qsZ10Tbp4eFhmUdHWC9fvlzmahR46tQpufb+/fsyn8+iEana8httB2ZLL4BMKD9givIDpig/YIryA6YoP2CK8gOmijrnj46BHhwclLmatUfz5ug9gC1btsi8pKQkNYvm8NHx2WrbayHr1Vx4cnJSrlVHayeJ3kadJPF10ceOHZN5FtHfRb3jEL3/EIn+m8/MzMhcvQeQ9b9ZoXjyA6YoP2CK8gOmKD9givIDpig/YIryA6aKOueP9mdfvHhR5mq//5o1a+RaNadPkviq6crKyp9eGx3FrI4FL2T99PR0ahbt14/m3bdu3ZL5+fPnZa5E3zv6bNH7EdHP/52iY8PVOwojIyNybdZ3FP7/GX7JTwHw16H8gCnKD5ii/IApyg+YovyAKcoPmCrqnD/rTFnl0ay9vb1d5q2trTJfunRpahady9/Y2Cjz0tJSmUdzfrW3PDoDPnoP4PLlyzKPqFl71jn/n1RdXS3ziYkJmavvVqz3E3jyA6YoP2CK8gOmKD9givIDpig/YIryA6ZyxZyl5nK5+Tu4/Y2iOX1tba3M1VkC0fronvje3l6Z4791dnbKPHoPQL2b8e7dO7l2YGBA5nNzcwW9KMCTHzBF+QFTlB8wRfkBU5QfMEX5AVOM+oAFhlEfAInyA6YoP2CK8gOmKD9givIDpig/YIryA6YoP2CK8gOmKD9givIDpig/YIryA6YoP2CqqPv5AcwfPPkBU5QfMEX5AVOUHzBF+QFTlB8wRfkBU5QfMEX5AVOUHzBF+QFTlB8wRfkBU5QfMEX5AVOUHzBF+QFTlB8wRfkBU5QfMEX5AVOUHzBF+QFT/wPE0YiCgANjYgAAAABJRU5ErkJggg==\n",
      "text/plain": [
       "<Figure size 432x288 with 1 Axes>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Target: 踝靴\n"
     ]
    }
   ],
   "source": [
    "%matplotlib inline\n",
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "idx = np.random.randint(len(X_train))\n",
    "plt.imshow(np.squeeze(X_train[idx]),cmap='gray')\n",
    "plt.axis('off')\n",
    "plt.show()\n",
    "print(\"Target:\",fashion_classes[y_train[idx]])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
    "datagen = ImageDataGenerator(\n",
    "    rotation_range=10,\n",
    "    horizontal_flip=True,\n",
    "    fill_mode='nearest'\n",
    ")\n",
    "def image_augmentation(image, nb_of_augmentation):\n",
    "    images = []\n",
    "    image = image.reshape(1, img_height, img_width, channels)\n",
    "    i = 0\n",
    "    for x_batch in datagen.flow(image, batch_size=1):\n",
    "        images.append(x_batch)\n",
    "        i += 1\n",
    "        if i >= nb_of_augmentation:\n",
    "            break\n",
    "    return images"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "def preprocess_data(images, targets,use_augmentation=False, nb_of_augmentation=1):\n",
    "    X = []\n",
    "    y = []\n",
    "    for x_,y_ in zip(images, targets):\n",
    "        x_=x_ / 255.\n",
    "        if use_augmentation:\n",
    "            argu_img = image_augmentation(x_, nb_of_augmentation)\n",
    "            for a in argu_img:\n",
    "                X.append(a.reshape(img_height, img_width, channels))\n",
    "                y.append(y_)\n",
    "        X.append(x_)\n",
    "        y.append(y_)\n",
    "    print('*预处理完成: %i 个样本\\n'% len(X))\n",
    "    return np.array(X), tf.keras.utils.to_categorical(y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "*预处理完成: 180000 个样本\n",
      "\n",
      "*预处理完成: 10000 个样本\n",
      "\n"
     ]
    }
   ],
   "source": [
    "X_train_shaped, y_train_shaped = preprocess_data(\n",
    "X_train,y_train,\n",
    "    use_augmentation=True,\n",
    "    nb_of_augmentation=nb_augmentation\n",
    ")\n",
    "X_test_shaped, y_test_shaped = preprocess_data(X_test, y_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model: \"sequential_4\"\n",
      "_________________________________________________________________\n",
      "Layer (type)                 Output Shape              Param #   \n",
      "=================================================================\n",
      "batch_normalization_5 (Batch (None, 28, 28, 1)         4         \n",
      "_________________________________________________________________\n",
      "conv2d_5 (Conv2D)            (None, 28, 28, 64)        1088      \n",
      "_________________________________________________________________\n",
      "max_pooling2d_5 (MaxPooling2 (None, 14, 14, 64)        0         \n",
      "_________________________________________________________________\n",
      "dropout_6 (Dropout)          (None, 14, 14, 64)        0         \n",
      "_________________________________________________________________\n",
      "conv2d_6 (Conv2D)            (None, 11, 11, 64)        65600     \n",
      "_________________________________________________________________\n",
      "max_pooling2d_6 (MaxPooling2 (None, 5, 5, 64)          0         \n",
      "_________________________________________________________________\n",
      "dropout_7 (Dropout)          (None, 5, 5, 64)          0         \n",
      "_________________________________________________________________\n",
      "flatten_1 (Flatten)          (None, 1600)              0         \n",
      "_________________________________________________________________\n",
      "dense_2 (Dense)              (None, 256)               409856    \n",
      "_________________________________________________________________\n",
      "dropout_8 (Dropout)          (None, 256)               0         \n",
      "_________________________________________________________________\n",
      "dense_3 (Dense)              (None, 64)                16448     \n",
      "_________________________________________________________________\n",
      "batch_normalization_6 (Batch (None, 64)                256       \n",
      "_________________________________________________________________\n",
      "dense_4 (Dense)              (None, 10)                650       \n",
      "=================================================================\n",
      "Total params: 493,902\n",
      "Trainable params: 493,772\n",
      "Non-trainable params: 130\n",
      "_________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "def create_model():\n",
    "    cnn = tf.keras.Sequential()\n",
    "    cnn.add(tf.keras.layers.InputLayer(input_shape=(img_height, img_width, channels)))\n",
    "    cnn.add(tf.keras.layers.BatchNormalization())\n",
    "    cnn.add(tf.keras.layers.Convolution2D(64, (4, 4), padding='same', activation='relu'))\n",
    "    cnn.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))\n",
    "    cnn.add(tf.keras.layers.Dropout(0.1))\n",
    "    cnn.add(tf.keras.layers.Convolution2D(64,(4,4),activation='relu'))\n",
    "    cnn.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))\n",
    "    cnn.add(tf.keras.layers.Dropout(0.3))\n",
    "    cnn.add(tf.keras.layers.Flatten())\n",
    "    cnn.add(tf.keras.layers.Dense(256, activation='relu'))\n",
    "    cnn.add(tf.keras.layers.Dropout(0.5))\n",
    "    cnn.add(tf.keras.layers.Dense(64, activation='relu'))\n",
    "    cnn.add(tf.keras.layers.BatchNormalization())\n",
    "    cnn.add(tf.keras.layers.Dense(num_classes, activation='softmax'))\n",
    "    cnn.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy'])\n",
    "    return cnn\n",
    "\n",
    "create_model().summary()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "iteration:0\n",
      "Epoch 1/2\n",
      "288/288 [==============================] - ETA: 0s - loss: 0.5974 - accuracy: 0.7881\n",
      "Epoch 00001: val_loss improved from inf to 0.50687, saving model to fashion_mnist-0.hdf5\n",
      "288/288 [==============================] - 216s 751ms/step - loss: 0.5974 - accuracy: 0.7881 - val_loss: 0.5069 - val_accuracy: 0.8767\n",
      "Epoch 2/2\n",
      "288/288 [==============================] - ETA: 0s - loss: 0.3685 - accuracy: 0.8663\n",
      "Epoch 00002: val_loss improved from 0.50687 to 0.28769, saving model to fashion_mnist-0.hdf5\n",
      "288/288 [==============================] - 209s 725ms/step - loss: 0.3685 - accuracy: 0.8663 - val_loss: 0.2877 - val_accuracy: 0.8950\n",
      "iteration:1\n",
      "Epoch 1/2\n",
      "288/288 [==============================] - ETA: 0s - loss: 0.6018 - accuracy: 0.7848\n",
      "Epoch 00001: val_loss improved from inf to 0.50264, saving model to fashion_mnist-1.hdf5\n",
      "288/288 [==============================] - 210s 728ms/step - loss: 0.6018 - accuracy: 0.7848 - val_loss: 0.5026 - val_accuracy: 0.8713\n",
      "Epoch 2/2\n",
      "288/288 [==============================] - ETA: 0s - loss: 0.3704 - accuracy: 0.8652\n",
      "Epoch 00002: val_loss improved from 0.50264 to 0.30559, saving model to fashion_mnist-1.hdf5\n",
      "288/288 [==============================] - 177s 616ms/step - loss: 0.3704 - accuracy: 0.8652 - val_loss: 0.3056 - val_accuracy: 0.8864\n"
     ]
    }
   ],
   "source": [
    "from sklearn.model_selection import train_test_split\n",
    "histories = []\n",
    "for i in range(0,iteraions):\n",
    "    print('iteration:%i'%i)\n",
    "    filepath = \"fashion_mnist-%i.hdf5\" % i\n",
    "    X_train_,X_val_,y_train_,y_val_ = train_test_split(X_train_shaped,y_train_shaped,test_size=0.2,random_state=42)\n",
    "    cnn = create_model()\n",
    "    history = cnn.fit(X_train_,y_train_,\n",
    "                      batch_size=batch_size,\n",
    "                      epochs=2,\n",
    "                      verbose=1,\n",
    "                      validation_data=(X_val_,y_val_),\n",
    "                      callbacks=[\n",
    "                          tf.keras.callbacks.ModelCheckpoint(filepath,monitor='val_loss',verbose=1,save_best_only=True)\n",
    "                      ])\n",
    "    histories.append(history.history)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
