{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "from tensorflow import keras\n",
    "from tensorflow.keras import layers\n",
    "import numpy as np"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "outputs": [],
   "source": [
    "inputs = keras.Input(shape=(784,),name='digits')\n",
    "x1 = layers.Dense(64,activation='relu')(inputs)\n",
    "x2 = layers.Dense(64,activation='relu')(x1)\n",
    "outputs = layers.Dense(10,name='predictions')(x2)\n",
    "model = keras.Model(inputs=inputs,outputs=outputs)\n",
    "\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "outputs": [],
   "source": [
    "optimizer = keras.optimizers.SGD(learning_rate=1e-3)\n",
    "loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n",
    "\n",
    "batch_size = 64\n",
    "(x_train,y_train),(x_test,y_test) = keras.datasets.mnist.load_data()\n",
    "x_train = np.reshape(x_train,(-1,784))\n",
    "x_test = np.reshape(x_train,(-1,784))\n",
    "train_dataset = tf.data.Dataset.from_tensor_slices((x_train,y_train))\n",
    "train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Start of epoch 0\n",
      "Training loss (for one batch) at step 0: 102.8965\n",
      "Seen so far: 64 samples\n",
      "Training loss (for one batch) at step 200: 1.1217\n",
      "Seen so far: 12864 samples\n",
      "Training loss (for one batch) at step 400: 0.7822\n",
      "Seen so far: 25664 samples\n",
      "Training loss (for one batch) at step 600: 0.6137\n",
      "Seen so far: 38464 samples\n",
      "Training loss (for one batch) at step 800: 1.0397\n",
      "Seen so far: 51264 samples\n",
      "Start of epoch 1\n",
      "Training loss (for one batch) at step 0: 1.1766\n",
      "Seen so far: 64 samples\n",
      "Training loss (for one batch) at step 200: 0.4597\n",
      "Seen so far: 12864 samples\n",
      "Training loss (for one batch) at step 400: 0.8410\n",
      "Seen so far: 25664 samples\n",
      "Training loss (for one batch) at step 600: 0.6014\n",
      "Seen so far: 38464 samples\n",
      "Training loss (for one batch) at step 800: 0.3915\n",
      "Seen so far: 51264 samples\n"
     ]
    }
   ],
   "source": [
    "epochs = 2\n",
    "for epoch in range(epochs):\n",
    "    print(\"Start of epoch %d\" % (epoch,))\n",
    "\n",
    "    for step,(x_batch_train,y_batch_train) in enumerate(train_dataset):\n",
    "        with tf.GradientTape() as tape:\n",
    "            logits = model(x_batch_train,training=True)\n",
    "            loss_value = loss_fn(y_batch_train,logits)\n",
    "        grads = tape.gradient(loss_value,model.trainable_weights)\n",
    "        optimizer.apply_gradients(zip(grads,model.trainable_weights))\n",
    "\n",
    "        if step % 200 == 0:\n",
    "            print(\"Training loss (for one batch) at step %d: %.4f\"\n",
    "                % (step, float(loss_value)))\n",
    "            print(\"Seen so far: %s samples\" % ((step + 1) * 64))\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "outputs": [],
   "source": [
    "inputs = keras.Input(shape=(784,),name='digits')\n",
    "x = layers.Dense(64,activation='relu',name='dense_1')(inputs)\n",
    "x = layers.Dense(64,activation='relu',name='dense_2')(x)\n",
    "outputs = layers.Dense(10,name='predictions')(x)\n",
    "model = keras.Model(inputs=inputs,outputs=outputs)\n",
    "\n",
    "optimizer = keras.optimizers.SGD(learning_rate=1e-3)\n",
    "loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n",
    "\n",
    "train_acc_metric = keras.metrics.SparseCategoricalAccuracy()\n",
    "val_acc_metric = keras.metrics.SparseCategoricalAccuracy()\n",
    "\n",
    "batch_size = 64\n",
    "train_dataset = tf.data.Dataset.from_tensor_slices((x_train,y_train))\n",
    "train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size)\n",
    "\n",
    "x_val = x_train[-10000:]\n",
    "y_val = y_train[-10000:]\n",
    "x_train = x_train[:-10000]\n",
    "y_train = y_train[:-10000]\n",
    "val_dataset = tf.data.Dataset.from_tensor_slices((x_val,y_val))\n",
    "val_dataset = val_dataset.batch(64)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Start of epoch 0\n",
      "Training loss (for one batch) at step 0: 164.8067\n",
      "Seen so far: 64 samples\n",
      "Training loss (for one batch) at step 200: 1.3874\n",
      "Seen so far: 12864 samples\n",
      "Training loss (for one batch) at step 400: 2.2871\n",
      "Seen so far: 25664 samples\n",
      "Training loss (for one batch) at step 600: 0.8346\n",
      "Seen so far: 38464 samples\n",
      "Training loss (for one batch) at step 800: 1.3369\n",
      "Seen so far: 51264 samples\n",
      "Training acc over epoch: 0.6938\n",
      "Validation acc:0.8253\n",
      "Time taken: 5.42s\n",
      "Start of epoch 1\n",
      "Training loss (for one batch) at step 0: 0.5012\n",
      "Seen so far: 64 samples\n",
      "Training loss (for one batch) at step 200: 0.5130\n",
      "Seen so far: 12864 samples\n",
      "Training loss (for one batch) at step 400: 0.5873\n",
      "Seen so far: 25664 samples\n",
      "Training loss (for one batch) at step 600: 0.4410\n",
      "Seen so far: 38464 samples\n",
      "Training loss (for one batch) at step 800: 0.6642\n",
      "Seen so far: 51264 samples\n",
      "Training acc over epoch: 0.8287\n",
      "Validation acc:0.8657\n",
      "Time taken: 5.30s\n"
     ]
    }
   ],
   "source": [
    "import time\n",
    "epochs = 2\n",
    "for epoch in range(epochs):\n",
    "    print(\"Start of epoch %d\" % (epoch,))\n",
    "    start_time = time.time()\n",
    "\n",
    "    for step,(x_batch_train,y_batch_train) in enumerate(train_dataset):\n",
    "        with tf.GradientTape() as tape:\n",
    "            logits = model(x_batch_train,training=True)\n",
    "            loss_value = loss_fn(y_batch_train,logits)\n",
    "        grads = tape.gradient(loss_value,model.trainable_weights)\n",
    "        optimizer.apply_gradients(zip(grads,model.trainable_weights))\n",
    "\n",
    "        train_acc_metric.update_state(y_batch_train,logits)\n",
    "\n",
    "        if step % 200 == 0:\n",
    "            print(\n",
    "                \"Training loss (for one batch) at step %d: %.4f\"\n",
    "                % (step, float(loss_value))\n",
    "            )\n",
    "            print(\"Seen so far: %d samples\" % ((step + 1) * 64))\n",
    "\n",
    "    train_acc = train_acc_metric.result()\n",
    "    print(\"Training acc over epoch: %.4f\" % (float(train_acc),))\n",
    "\n",
    "    train_acc_metric.reset_states()\n",
    "    for x_batch_val,y_batch_val in val_dataset:\n",
    "        val_logits = model(x_batch_val,training=False)\n",
    "        val_acc_metric.update_state(y_batch_val,val_logits)\n",
    "\n",
    "    val_acc = val_acc_metric.result()\n",
    "    val_acc_metric.reset_states()\n",
    "    print(\"Validation acc:%.4f\" % (float(val_acc)))\n",
    "    print(\"Time taken: %.2fs\" % (time.time() - start_time))"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "outputs": [],
   "source": [
    "@tf.function\n",
    "def train_step(x,y):\n",
    "    with tf.GradientTape() as tape:\n",
    "        logits = model(x,training=True)\n",
    "        loss_value = loss_fn(y,logits)\n",
    "    grads = tape.gradient(loss_value,model.trainable_weights)\n",
    "    optimizer.apply_gradients(zip(grads,model.trainable_weights))\n",
    "    train_acc_metric.update_state(y,logits)\n",
    "    return loss_value\n",
    "\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "outputs": [],
   "source": [
    "@tf.function\n",
    "def test_step(x,y):\n",
    "    val_logits = model(x,training=False)\n",
    "    val_acc_metric.update_state(y,val_logits)\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Start of epoch 0\n",
      "Training loss (for one batch ) at step 0:0.4670\n",
      "Seen so far: 64 samples\n",
      "Training loss (for one batch ) at step 200:0.7738\n",
      "Seen so far: 12864 samples\n",
      "Training loss (for one batch ) at step 400:0.6823\n",
      "Seen so far: 25664 samples\n",
      "Training loss (for one batch ) at step 600:0.3738\n",
      "Seen so far: 38464 samples\n",
      "Training loss (for one batch ) at step 800:0.7392\n",
      "Seen so far: 51264 samples\n",
      "Training acc over epoch :.0.864200\n",
      "Validation acc: 0.8908\n",
      "Time taken: 1.53s\n",
      "Start of epoch 1\n",
      "Training loss (for one batch ) at step 0:0.3529\n",
      "Seen so far: 64 samples\n",
      "Training loss (for one batch ) at step 200:0.6523\n",
      "Seen so far: 12864 samples\n",
      "Training loss (for one batch ) at step 400:0.4990\n",
      "Seen so far: 25664 samples\n",
      "Training loss (for one batch ) at step 600:0.4103\n",
      "Seen so far: 38464 samples\n",
      "Training loss (for one batch ) at step 800:0.3966\n",
      "Seen so far: 51264 samples\n",
      "Training acc over epoch :.0.883117\n",
      "Validation acc: 0.9047\n",
      "Time taken: 1.03s\n"
     ]
    }
   ],
   "source": [
    "import time\n",
    "epochs = 2\n",
    "for epoch in range(epochs):\n",
    "    print(\"Start of epoch %d\" % (epoch,))\n",
    "    start_time = time.time()\n",
    "\n",
    "    for step,(x_batch_train,y_batch_train) in enumerate(train_dataset):\n",
    "        loss_value = train_step(x_batch_train,y_batch_train)\n",
    "\n",
    "        if step % 200 == 0:\n",
    "            print(\"Training loss (for one batch ) at step %d:%.4f\"\n",
    "                  % (step,float(loss_value)))\n",
    "            print(\"Seen so far: %d samples\" % ((step + 1) * 64))\n",
    "\n",
    "    train_acc = train_acc_metric.result()\n",
    "    print(\"Training acc over epoch :.%4f\" % (float(train_acc),))\n",
    "    train_acc_metric.reset_states()\n",
    "\n",
    "    for x_batch_val,y_batch_val in val_dataset:\n",
    "        test_step(x_batch_val,y_batch_val)\n",
    "\n",
    "    val_acc = val_acc_metric.result()\n",
    "    val_acc_metric.reset_states()\n",
    "    print(\"Validation acc: %.4f\" % (float(val_acc),))\n",
    "    print(\"Time taken: %.2fs\" % (time.time() - start_time))\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "outputs": [],
   "source": [
    "class ActivityRegularizationLayer(layers.Layer):\n",
    "    def call(self,inputs):\n",
    "        self.add_loss(1e-2 * tf.reduce_sum(inputs))\n",
    "        return inputs\n",
    "\n",
    "\n",
    "inputs = keras.Input(shape=(784,),name='digits')\n",
    "x = layers.Dense(64,activation='relu')(inputs)\n",
    "x = ActivityRegularizationLayer()(x)\n",
    "x = layers.Dense(64,activation='relu')(x)\n",
    "outputs = layers.Dense(10,name='repdictions')(x)\n",
    "\n",
    "model = keras.Model(inputs=inputs,outputs=outputs)\n",
    "\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "outputs": [],
   "source": [
    "@tf.function\n",
    "def train_step(x,y):\n",
    "    with tf.GradientTape() as tape:\n",
    "        logits = model(x,training=True)\n",
    "        loss_value = loss_fn(y,logits)\n",
    "        loss_value += sum(model.losses)\n",
    "    grads = tape.gradient((loss_value,model.trainable_weights))\n",
    "    optimizer.apply_gradients(zip(grads,model.trainable_weights))\n",
    "    train_acc_metric.update_state(y,logits)\n",
    "    return loss_value"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "outputs": [
    {
     "data": {
      "text/plain": "[<tf.Variable 'dense_2_1/kernel:0' shape=(784, 64) dtype=float32, numpy=\n array([[-0.05711845,  0.01983435, -0.01610497, ..., -0.02485434,\n         -0.01952898, -0.0410205 ],\n        [ 0.0823616 , -0.04090701, -0.07374496, ...,  0.01792967,\n          0.01693538, -0.04031307],\n        [-0.03444666,  0.07158034, -0.00594568, ..., -0.0423037 ,\n          0.05563646, -0.06188815],\n        ...,\n        [ 0.0430133 , -0.02953172, -0.08228853, ..., -0.01824985,\n          0.07827469,  0.04779667],\n        [ 0.03314535, -0.02600519, -0.04063687, ..., -0.01990834,\n         -0.06930259, -0.06707999],\n        [-0.07610025, -0.06810175,  0.03883003, ..., -0.04139709,\n         -0.00682366,  0.08198994]], dtype=float32)>,\n <tf.Variable 'dense_2_1/bias:0' shape=(64,) dtype=float32, numpy=\n array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n        0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n        0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n        0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=float32)>,\n <tf.Variable 'dense_3/kernel:0' shape=(64, 64) dtype=float32, numpy=\n array([[-0.16127011, -0.02889392,  0.20687385, ..., -0.01437341,\n         -0.19819595, -0.04728246],\n        [-0.11833847,  0.16634391, -0.10504763, ...,  0.03379904,\n          0.13833453,  0.2090721 ],\n        [ 0.01820354, -0.05223   , -0.21338467, ...,  0.02445808,\n          0.02713525,  0.07488872],\n        ...,\n        [-0.11600487, -0.12638131, -0.02175514, ..., -0.153091  ,\n          0.13086118, -0.07534479],\n        [-0.0415549 , -0.0749948 , -0.15757929, ...,  0.16052626,\n          0.0165475 ,  0.06829397],\n        [ 0.1307009 ,  0.19864132,  0.20748173, ...,  0.0433598 ,\n         -0.14140469,  0.17738049]], dtype=float32)>,\n <tf.Variable 'dense_3/bias:0' shape=(64,) dtype=float32, numpy=\n array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n        0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n        0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n        0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=float32)>,\n <tf.Variable 'repdictions/kernel:0' shape=(64, 10) dtype=float32, numpy=\n array([[-0.20673558, -0.00408909, -0.09336756, -0.1237691 , -0.11041798,\n         -0.21897103,  0.13589764, -0.00282535, -0.11166407,  0.0042254 ],\n        [-0.08532101,  0.20529002,  0.03346375,  0.18169335,  0.06976151,\n          0.27540603, -0.03943411, -0.01422235, -0.14846878, -0.11927579],\n        [ 0.23445871, -0.16668226, -0.19806045,  0.1575571 ,  0.22799996,\n          0.2341769 ,  0.22409788,  0.13391894,  0.05337724, -0.03451961],\n        [ 0.04796049,  0.1888521 , -0.06285924, -0.2845102 , -0.06309488,\n         -0.08260688, -0.17830399, -0.02292463, -0.27397048,  0.17566329],\n        [-0.03135166,  0.08785334, -0.05370113, -0.2604662 ,  0.17515814,\n         -0.22375694, -0.09397185,  0.11849445,  0.08307144, -0.09027672],\n        [ 0.28431937,  0.20806098, -0.00567058,  0.2353088 , -0.10575154,\n          0.14037994,  0.12989452,  0.20483041, -0.1735935 , -0.27618346],\n        [ 0.05898923, -0.24283071, -0.15092969, -0.04026575,  0.19851437,\n         -0.18681374, -0.10619214,  0.17077985,  0.05430725,  0.03607816],\n        [ 0.04095727,  0.03166366, -0.04520766,  0.25454244,  0.16427612,\n          0.01918286, -0.2372355 ,  0.157837  ,  0.04139414, -0.21113548],\n        [ 0.25315186, -0.1369977 ,  0.06857902, -0.20949942, -0.00401196,\n         -0.01020706,  0.15022242,  0.28154728, -0.15324613,  0.164094  ],\n        [ 0.13798237, -0.19920832, -0.10383429,  0.07952213, -0.21874571,\n         -0.13214594, -0.21582961, -0.12511867,  0.05826375, -0.00854933],\n        [ 0.17710996, -0.00804684,  0.2638239 , -0.25699747, -0.14595512,\n         -0.15279908, -0.11375159,  0.06447926,  0.25575426,  0.12989834],\n        [-0.2651519 , -0.01521483, -0.19781545,  0.07175562,  0.10113171,\n          0.00321984, -0.09803738, -0.10811144,  0.21142569, -0.03811231],\n        [-0.13267513, -0.07642049,  0.17623243, -0.09112084,  0.06415477,\n         -0.21718405, -0.04367813, -0.09338047,  0.04161391,  0.12229714],\n        [-0.15562849,  0.11764476,  0.23610577, -0.23889987, -0.13250819,\n          0.11599281, -0.14902444,  0.09128997, -0.17849237, -0.22256596],\n        [-0.10138194, -0.16543439, -0.03760444, -0.07963428,  0.12057403,\n         -0.04552838, -0.02814198, -0.16950929,  0.22931793,  0.01026136],\n        [-0.04768461,  0.20116231,  0.01110929, -0.18995354,  0.07399291,\n         -0.18389425, -0.16874024, -0.00267068, -0.17687595, -0.20736364],\n        [ 0.09733003, -0.17766413, -0.08115968,  0.24817899, -0.28215805,\n          0.00062037,  0.19968694,  0.02355316, -0.13628119, -0.2806226 ],\n        [-0.233972  ,  0.2435601 , -0.03129855, -0.20102605, -0.11374494,\n         -0.08124787,  0.137122  ,  0.03281343,  0.14040872, -0.19702542],\n        [-0.14869803, -0.0378066 , -0.2381313 ,  0.14551058, -0.19039604,\n          0.00099015,  0.25280693, -0.19109625, -0.05393115, -0.20230176],\n        [ 0.2766876 ,  0.06192052,  0.02525771,  0.06844324, -0.15528157,\n          0.12837484,  0.16724867,  0.11424318, -0.11275384,  0.00420526],\n        [-0.02389145, -0.22158483, -0.04084919, -0.23208013, -0.27272716,\n         -0.14170438,  0.1738576 ,  0.27543202, -0.18772393, -0.01014766],\n        [-0.1871555 ,  0.20103785,  0.17929685,  0.09904072,  0.00785109,\n         -0.14634344, -0.21041755,  0.11563069,  0.2098343 , -0.07734492],\n        [ 0.0847671 , -0.27832842,  0.02208465, -0.08756672,  0.21236116,\n          0.22191629,  0.14627385, -0.0760864 , -0.24145359, -0.17693405],\n        [ 0.12667504,  0.04229251, -0.07972498, -0.27265003,  0.17836577,\n         -0.09585644, -0.23798099, -0.0130856 ,  0.11516374, -0.06474702],\n        [ 0.11330366,  0.03037229,  0.15622896,  0.21221513, -0.01780987,\n         -0.2685156 ,  0.13776186,  0.06922397, -0.24002989,  0.17306077],\n        [ 0.02181512,  0.2072939 ,  0.24428341, -0.18243131,  0.09639868,\n         -0.10570782,  0.19537896,  0.21762052,  0.27044234, -0.15661573],\n        [-0.24408415,  0.27136996, -0.18347448,  0.01023999, -0.21509443,\n          0.00711668,  0.12348199,  0.28028652,  0.03028396, -0.03832698],\n        [ 0.12782487,  0.07374802,  0.22286883, -0.23248497,  0.07327172,\n         -0.06678492,  0.27868715, -0.24250823,  0.08821478, -0.17427558],\n        [ 0.1106033 , -0.28108153, -0.20890947, -0.26516926, -0.19737993,\n         -0.08331516,  0.08939362, -0.14035773, -0.02821735,  0.19234121],\n        [-0.1797317 , -0.07988235, -0.12824632, -0.02280849,  0.23233196,\n         -0.0615426 ,  0.21843633, -0.04509178,  0.03600958, -0.12577035],\n        [ 0.04019788, -0.21493448,  0.00480491,  0.1578469 , -0.25602412,\n          0.25673506,  0.22822359, -0.19178587, -0.06950217,  0.18692419],\n        [-0.11751027, -0.15385774, -0.2332925 , -0.22776464, -0.22467303,\n         -0.26670974, -0.27667043,  0.2745153 , -0.16495442,  0.06852615],\n        [-0.10244352, -0.16623378, -0.14961372,  0.08745396, -0.17393246,\n          0.0205822 , -0.13619152,  0.12203285,  0.10705745,  0.15775219],\n        [ 0.20377421,  0.2102494 ,  0.19713104, -0.11691536, -0.2144983 ,\n         -0.08144672, -0.10483789,  0.09242025, -0.22531417, -0.00810474],\n        [ 0.12925577, -0.00053787,  0.0015958 , -0.03475249, -0.21732432,\n         -0.15258767, -0.1493611 ,  0.14377153, -0.13475111,  0.2414122 ],\n        [-0.1266546 ,  0.04523721, -0.25910956, -0.2326703 , -0.04182123,\n          0.18635938, -0.25680447, -0.27876073,  0.20056793,  0.1282967 ],\n        [ 0.15422443,  0.01828095, -0.18568999, -0.24538049, -0.23618437,\n          0.08338863,  0.19067073, -0.20596477, -0.2627907 ,  0.10038343],\n        [ 0.19516546, -0.11670938,  0.01981851,  0.04977164,  0.19414848,\n         -0.05932982,  0.24680659, -0.17613493, -0.2745278 ,  0.19214663],\n        [ 0.19642562, -0.03311127, -0.0669264 ,  0.16505495, -0.07922715,\n         -0.04027207,  0.17204699, -0.03279626,  0.22490707, -0.10175622],\n        [ 0.02106789,  0.12316787, -0.13449381,  0.21346489,  0.04827353,\n          0.0981327 , -0.02842745, -0.26316905, -0.22784631,  0.26631954],\n        [-0.15289752,  0.14311951,  0.28021666, -0.17640927,  0.20005918,\n         -0.2237154 , -0.28032503,  0.2555587 ,  0.20619214, -0.20615336],\n        [ 0.26604494, -0.2539352 ,  0.06716612, -0.1681746 ,  0.15599006,\n          0.01384667, -0.14903721,  0.12756953,  0.13269415,  0.2773942 ],\n        [ 0.19863975,  0.27804348,  0.10411122,  0.11534843,  0.06046411,\n         -0.19850716,  0.16384709, -0.15341891, -0.17777127, -0.18953738],\n        [-0.14607494, -0.21708731,  0.27079442,  0.15752429,  0.20079666,\n          0.03556076,  0.00403953,  0.17862213,  0.27328065, -0.05379896],\n        [ 0.25903937,  0.23442551,  0.15311614, -0.15076159,  0.11683893,\n          0.16862014,  0.28398326,  0.18901554, -0.08699048, -0.28095186],\n        [ 0.25111696, -0.15589802,  0.23169866, -0.19145146,  0.08174366,\n         -0.02124628,  0.24516025, -0.2833046 , -0.07917698, -0.01757893],\n        [ 0.2701868 , -0.08695659, -0.19494584,  0.14795464, -0.2325817 ,\n          0.14948949, -0.2198967 , -0.21886906, -0.04206365,  0.24636444],\n        [ 0.1699161 ,  0.04257202, -0.17127082,  0.27639732,  0.13763034,\n          0.11704889, -0.22356074,  0.24462399, -0.28139427, -0.05492002],\n        [ 0.19366544,  0.24085853,  0.27330074,  0.14012825, -0.12585147,\n         -0.03430861, -0.19978395, -0.04303271, -0.04794109, -0.13125713],\n        [-0.0424678 ,  0.2603294 ,  0.2313964 , -0.09031717, -0.03382599,\n          0.01689866, -0.09509453, -0.23541899,  0.07554334, -0.25813407],\n        [-0.01873189,  0.22439471,  0.22393051,  0.2107538 ,  0.1335932 ,\n         -0.11189029, -0.20238975,  0.03700566,  0.03855616,  0.2810962 ],\n        [ 0.21812966,  0.07614744,  0.1722123 ,  0.28340468, -0.00987616,\n          0.11071396,  0.13184771,  0.05591351,  0.2172443 ,  0.20295206],\n        [-0.02043128,  0.2565699 , -0.2536667 , -0.0714229 ,  0.21373612,\n          0.28247693,  0.07397622,  0.20427516,  0.2215878 ,  0.11862251],\n        [ 0.12708765, -0.18459018,  0.1464645 , -0.09257211,  0.19831517,\n          0.24398991, -0.00993761,  0.28325912,  0.28391644, -0.03707646],\n        [-0.09292181, -0.17358094, -0.22570767,  0.21065685, -0.14909512,\n         -0.04322055,  0.06247967, -0.2525701 , -0.09371278,  0.23721078],\n        [ 0.2845494 ,  0.05545262,  0.0341844 , -0.12905793, -0.12653585,\n          0.17493927, -0.09245242, -0.06333928,  0.17593542,  0.05299741],\n        [-0.16718471, -0.2328743 , -0.15259534, -0.09023856, -0.06469564,\n         -0.19890669, -0.275425  , -0.06809579, -0.26111418, -0.28351787],\n        [ 0.10642859, -0.21962772, -0.04989874,  0.22824559,  0.23201612,\n         -0.0179857 , -0.11113542,  0.12828779, -0.00969917,  0.07276988],\n        [-0.18771307,  0.23809806,  0.13890287,  0.26560822, -0.24244924,\n         -0.13543583,  0.13519415, -0.19514802,  0.18008119,  0.0772239 ],\n        [ 0.24428168,  0.25403914, -0.2697264 ,  0.10668537,  0.24622002,\n         -0.09424429,  0.2506849 , -0.07624343,  0.17089504, -0.26229355],\n        [-0.1331737 ,  0.17847255,  0.23636976, -0.22482002,  0.19747782,\n         -0.0340831 , -0.03361037, -0.13720198, -0.16417152, -0.16349955],\n        [-0.0600567 ,  0.2606016 ,  0.263873  , -0.00500533, -0.15431294,\n         -0.06719722, -0.03618345, -0.1335348 , -0.10626683, -0.07157083],\n        [-0.13728656, -0.1718554 , -0.048592  , -0.1540851 , -0.19439064,\n         -0.18351936, -0.06294362,  0.07868907, -0.17257144, -0.27708766],\n        [-0.22163744, -0.24114978,  0.19890654, -0.11768162, -0.14464119,\n          0.0703789 ,  0.2130388 , -0.1262178 ,  0.17120644, -0.0087133 ]],\n       dtype=float32)>,\n <tf.Variable 'repdictions/bias:0' shape=(10,) dtype=float32, numpy=array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=float32)>]"
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.trainable_weights\n",
    "\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model: \"discriminator\"\n",
      "_________________________________________________________________\n",
      "Layer (type)                 Output Shape              Param #   \n",
      "=================================================================\n",
      "conv2d_3 (Conv2D)            (None, 14, 14, 64)        640       \n",
      "_________________________________________________________________\n",
      "leaky_re_lu_5 (LeakyReLU)    (None, 14, 14, 64)        0         \n",
      "_________________________________________________________________\n",
      "conv2d_4 (Conv2D)            (None, 7, 7, 128)         73856     \n",
      "_________________________________________________________________\n",
      "leaky_re_lu_6 (LeakyReLU)    (None, 7, 7, 128)         0         \n",
      "_________________________________________________________________\n",
      "global_max_pooling2d_1 (Glob (None, 128)               0         \n",
      "_________________________________________________________________\n",
      "dense_6 (Dense)              (None, 1)                 129       \n",
      "=================================================================\n",
      "Total params: 74,625\n",
      "Trainable params: 74,625\n",
      "Non-trainable params: 0\n",
      "_________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "discriminator = keras.Sequential(\n",
    "    [\n",
    "        keras.Input(shape=(28,28,1)),\n",
    "        layers.Conv2D(64,(3,3),strides=(2,2),padding='same'),\n",
    "        layers.LeakyReLU(alpha=0.2),\n",
    "        layers.Conv2D(128,(3,3),strides=(2,2),padding='same'),\n",
    "        layers.LeakyReLU(alpha=0.2),\n",
    "        layers.GlobalMaxPooling2D(),\n",
    "        layers.Dense(1),\n",
    "\n",
    "     ],\n",
    "    name='discriminator'\n",
    ")\n",
    "discriminator.summary()"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "outputs": [],
   "source": [
    "latent_dim = 128\n",
    "generator = keras.Sequential([\n",
    "    keras.Input(shape=(latent_dim,)),\n",
    "    layers.Dense(7*7*128),\n",
    "    layers.LeakyReLU(alpha=0.2),\n",
    "    layers.Reshape((7,7,128)),\n",
    "    layers.Conv2DTranspose(128,(4,4),strides=(2,2),padding='same'),\n",
    "    layers.LeakyReLU(alpha=0.2),\n",
    "    layers.Conv2DTranspose(128,(4,4),strides=(2,2),padding='same'),\n",
    "    layers.LeakyReLU(alpha=0.2),\n",
    "    layers.Conv2D(1,(7,7),padding='same',activation='sigmoid')],\n",
    "    name='generator'\n",
    ")"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "outputs": [],
   "source": [
    "d_optimizer = keras.optimizers.Adam(learning_rate=0.0003)\n",
    "g_optimizer = keras.optimizers.Adam(learning_rate=0.0004)\n",
    "\n",
    "loss_fn = keras.losses.BinaryCrossentropy(from_logits=True)\n",
    "\n",
    "@tf.function\n",
    "def train_step(real_images):\n",
    "    random_latent_vectors = tf.random.normal(shape=(batch_size,latent_dim))\n",
    "    generated_images = generator(random_latent_vectors)\n",
    "\n",
    "    combined_images = tf.concat([generated_images,real_images],axis=0)\n",
    "\n",
    "    labels = tf.concat(\n",
    "        [tf.ones((batch_size,1)),tf.zeros((real_images.shape[0],1))],\n",
    "        axis=0\n",
    "    )\n",
    "    labels += 0.05 * tf.random.uniform(labels.shape)\n",
    "\n",
    "    with tf.GradientTape() as tape:\n",
    "        predictions = discriminator(combined_images)\n",
    "        d_loss = loss_fn(labels,predictions)\n",
    "    grads = tape.gradient(d_loss,discriminator.trainable_weights)\n",
    "    d_optimizer.apply_gradients(zip(grads,discriminator.trainable_weights))\n",
    "\n",
    "    random_latent_vectors = tf.random.normal(shape=(batch_size,latent_dim))\n",
    "    misleading_labels = tf.zeros((batch_size,1))\n",
    "\n",
    "    with tf.GradientTape() as tape:\n",
    "        predictions = discriminator(generator(random_latent_vectors))\n",
    "        g_loss = loss_fn(misleading_labels,predictions)\n",
    "\n",
    "    grads = tape.gradient(g_loss,generator.trainable_weights)\n",
    "    g_optimizer.apply_gradients(zip(grads,generator.trainable_weights))\n",
    "    return d_loss,g_loss,generated_images\n",
    "\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "start epoch 0\n",
      "discriminator loss at step 0: 0.69\n",
      "adversarial loss at step 0: 0.68\n"
     ]
    },
    {
     "ename": "ImportError",
     "evalue": "Could not import PIL.Image. The use of `array_to_img` requires PIL.",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mImportError\u001B[0m                               Traceback (most recent call last)",
      "\u001B[1;32m<ipython-input-24-58a624f88c13>\u001B[0m in \u001B[0;36m<module>\u001B[1;34m\u001B[0m\n\u001B[0;32m     19\u001B[0m             \u001B[1;31m# Save one generated image\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0;32m     20\u001B[0m             img = tf.keras.preprocessing.image.array_to_img(\n\u001B[1;32m---> 21\u001B[1;33m                 \u001B[0mgenerated_images\u001B[0m\u001B[1;33m[\u001B[0m\u001B[1;36m0\u001B[0m\u001B[1;33m]\u001B[0m \u001B[1;33m*\u001B[0m \u001B[1;36m255.0\u001B[0m\u001B[1;33m,\u001B[0m \u001B[0mscale\u001B[0m\u001B[1;33m=\u001B[0m\u001B[1;32mFalse\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0m\u001B[0;32m     22\u001B[0m             )\n\u001B[0;32m     23\u001B[0m             \u001B[0mimg\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0msave\u001B[0m\u001B[1;33m(\u001B[0m\u001B[0mos\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mpath\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mjoin\u001B[0m\u001B[1;33m(\u001B[0m\u001B[0msave_dir\u001B[0m\u001B[1;33m,\u001B[0m \u001B[1;34m\"generated_img\"\u001B[0m \u001B[1;33m+\u001B[0m \u001B[0mstr\u001B[0m\u001B[1;33m(\u001B[0m\u001B[0mstep\u001B[0m\u001B[1;33m)\u001B[0m \u001B[1;33m+\u001B[0m \u001B[1;34m\".png\"\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n",
      "\u001B[1;32mD:\\Anaconda3\\envs\\keras\\lib\\site-packages\\tensorflow\\python\\keras\\preprocessing\\image.py\u001B[0m in \u001B[0;36marray_to_img\u001B[1;34m(x, data_format, scale, dtype)\u001B[0m\n\u001B[0;32m     85\u001B[0m       \u001B[0mdtype\u001B[0m \u001B[1;33m=\u001B[0m \u001B[0mbackend\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mfloatx\u001B[0m\u001B[1;33m(\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0;32m     86\u001B[0m     \u001B[0mkwargs\u001B[0m\u001B[1;33m[\u001B[0m\u001B[1;34m'dtype'\u001B[0m\u001B[1;33m]\u001B[0m \u001B[1;33m=\u001B[0m \u001B[0mdtype\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[1;32m---> 87\u001B[1;33m   \u001B[1;32mreturn\u001B[0m \u001B[0mimage\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0marray_to_img\u001B[0m\u001B[1;33m(\u001B[0m\u001B[0mx\u001B[0m\u001B[1;33m,\u001B[0m \u001B[0mdata_format\u001B[0m\u001B[1;33m=\u001B[0m\u001B[0mdata_format\u001B[0m\u001B[1;33m,\u001B[0m \u001B[0mscale\u001B[0m\u001B[1;33m=\u001B[0m\u001B[0mscale\u001B[0m\u001B[1;33m,\u001B[0m \u001B[1;33m**\u001B[0m\u001B[0mkwargs\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0m\u001B[0;32m     88\u001B[0m \u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0;32m     89\u001B[0m \u001B[1;33m\u001B[0m\u001B[0m\n",
      "\u001B[1;32mD:\\Anaconda3\\envs\\keras\\lib\\site-packages\\keras_preprocessing\\image\\utils.py\u001B[0m in \u001B[0;36marray_to_img\u001B[1;34m(x, data_format, scale, dtype)\u001B[0m\n\u001B[0;32m    250\u001B[0m     \"\"\"\n\u001B[0;32m    251\u001B[0m     \u001B[1;32mif\u001B[0m \u001B[0mpil_image\u001B[0m \u001B[1;32mis\u001B[0m \u001B[1;32mNone\u001B[0m\u001B[1;33m:\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[1;32m--> 252\u001B[1;33m         raise ImportError('Could not import PIL.Image. '\n\u001B[0m\u001B[0;32m    253\u001B[0m                           'The use of `array_to_img` requires PIL.')\n\u001B[0;32m    254\u001B[0m     \u001B[0mx\u001B[0m \u001B[1;33m=\u001B[0m \u001B[0mnp\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0masarray\u001B[0m\u001B[1;33m(\u001B[0m\u001B[0mx\u001B[0m\u001B[1;33m,\u001B[0m \u001B[0mdtype\u001B[0m\u001B[1;33m=\u001B[0m\u001B[0mdtype\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n",
      "\u001B[1;31mImportError\u001B[0m: Could not import PIL.Image. The use of `array_to_img` requires PIL."
     ]
    }
   ],
   "source": [
    "import os\n",
    "batch_size = 64\n",
    "(x_train,_),(x_test,_) = keras.datasets.mnist.load_data()\n",
    "all_digits = np.concatenate([x_train,x_test])\n",
    "all_digits = all_digits.astype('float32') / 255.0\n",
    "all_digits = np.reshape(all_digits,(-1,28,28,1))\n",
    "dataset = tf.data.Dataset.from_tensor_slices(all_digits)\n",
    "dataset = dataset.shuffle(buffer_size=1024).batch(batch_size)\n",
    "\n",
    "epochs = 1\n",
    "save_dir = './'\n",
    "for epoch in range(epochs):\n",
    "    print(\"start epoch\",epoch)\n",
    "    for step,real_images in enumerate(dataset):\n",
    "        d_loss,g_loss,generated_images = train_step(real_images)\n",
    "        if step % 200 == 0:\n",
    "            print(\"discriminator loss at step %d: %.2f\" % (step, d_loss))\n",
    "            print(\"adversarial loss at step %d: %.2f\" % (step, g_loss))\n",
    "            # Save one generated image\n",
    "            img = tf.keras.preprocessing.image.array_to_img(\n",
    "                generated_images[0] * 255.0, scale=False\n",
    "            )\n",
    "            img.save(os.path.join(save_dir, \"generated_img\" + str(step) + \".png\"))\n",
    "\n",
    "        # To limit execution time we stop after 10 steps.\n",
    "        # Remove the lines below to actually train the model!\n",
    "        if step > 10:\n",
    "            break\n",
    "\n",
    "\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}