{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "%matplotlib inline\n",
    "import matplotlib\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import tensorflow as tf\n",
    "import scipy.io\n",
    "\n",
    "import os, re\n",
    "\n",
    "import claude.utils as cu\n",
    "import claude.claudeflow.autoencoder as ae\n",
    "import claude.claudeflow.helper as cfh\n",
    "import claude.claudeflow.training as cft"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "seed = 1337\n",
    "tf.set_random_seed(seed)\n",
    "np.random.seed(seed)\n",
    "\n",
    "# Parameters\n",
    "# Channel Parameters\n",
    "chParam = cu.AttrDict()\n",
    "chParam.M = 16\n",
    "\n",
    "# Auto-Encoder Parameters\n",
    "aeParam = cu.AttrDict()\n",
    "aeParam.constellationDim\t= 2\n",
    "aeParam.constellationOrder\t= chParam.M\n",
    "aeParam.nLayers\t\t= 2\n",
    "aeParam.nHidden \t= 32\n",
    "aeParam.activation  = tf.nn.selu\n",
    "aeParam.dtype       = tf.float32\n",
    "\n",
    "# Training Parameters\n",
    "trainingParam = cu.AttrDict()\n",
    "trainingParam.sampleSize\t= 512*chParam.M # Increase for better results (especially if M>16)\n",
    "trainingParam.batchSize \t= 32*chParam.M  # Increase for better results (especially if M>16)\n",
    "trainingParam.learningRate\t= 0.001\n",
    "trainingParam.displayStep\t= 20\n",
    "trainingParam.path\t\t\t= 'results_AWGN_noChannel'\n",
    "trainingParam.filename\t\t= 'M{}'.format(chParam.M)\n",
    "trainingParam.saveWeights\t= True\n",
    "trainingParam.earlyStopping = 10\n",
    "trainingParam.tx_iterations = 25\n",
    "trainingParam.rx_iterations = 25\n",
    "trainingParam.policy_sigma2 = 0.02\n",
    "\n",
    "# TF constants\n",
    "two = tf.constant(2,aeParam.dtype)\n",
    "minusOne = tf.constant(-1,aeParam.dtype)\n",
    "DIM = tf.constant(aeParam.constellationDim,aeParam.dtype)\n",
    "PI = tf.constant(np.pi,aeParam.dtype)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Channel Graph\n",
    "sigma2_noise = tf.constant(0.1,aeParam.dtype)\n",
    "\n",
    "channel_in = tf.placeholder( aeParam.dtype, shape=(None, aeParam.constellationDim) )\n",
    "noise = tf.sqrt( sigma2_noise )\\\n",
    "            *tf.rsqrt(two)\\\n",
    "            *tf.random_normal(shape=tf.shape(channel_in),dtype=aeParam.dtype)\n",
    "\n",
    "channel_out = channel_in + noise"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING: Logging before flag parsing goes to stderr.\n",
      "W0917 20:15:40.704045 140237100992320 deprecation.py:506] From /home/rasmus/.conda/envs/claudeOnline/lib/python3.6/site-packages/tensorflow/python/ops/init_ops.py:1251: calling VarianceScaling.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Call initializer instance with the dtype argument instead of passing it to the constructor\n"
     ]
    }
   ],
   "source": [
    "# Tx Graph     \n",
    "X = tf.placeholder( aeParam.dtype, shape=(None, chParam.M) )\n",
    "enc, enc_seed = ae.encoder(X, aeParam)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# enc = ae.encoder(X,aeParam.hiddenUnits,aeParam.nLayers,aeParam.activation,nOutput=aeParam.dimension)\n",
    "# enc_norm = cfh.IQ_norm( enc )\n",
    "\n",
    "# Tx policy\n",
    "policy_sigma2 = tf.placeholder( aeParam.dtype, shape=())\n",
    "perturbation = tf.sqrt( policy_sigma2 ) * tf.rsqrt(two) * tf.random_normal(shape=tf.shape(enc),dtype=aeParam.dtype)\n",
    "enc_pert = enc + perturbation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Rx Graph\n",
    "Y = tf.placeholder( aeParam.dtype, shape=(None, aeParam.constellationDim) )\n",
    "dec = ae.decoder(Y,aeParam)\n",
    "per_ex_loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=X,logits=dec)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Rx Loss\n",
    "correct_prediction = tf.equal(tf.argmax(X,1), tf.argmax(dec,1))\n",
    "accuracy = tf.reduce_mean(tf.cast(correct_prediction, aeParam.dtype))\n",
    "rx_loss = tf.reduce_mean(per_ex_loss)\n",
    "rx_vars = [item for item in tf.global_variables() if 'decoder' in item.name]\n",
    "rx_optimizer = tf.train.AdamOptimizer(learning_rate=trainingParam.learningRate).minimize(rx_loss, var_list=rx_vars)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "W0917 20:15:42.283854 140237100992320 lazy_loader.py:50] \n",
      "The TensorFlow contrib module will not be included in TensorFlow 2.0.\n",
      "For more information, please see:\n",
      "  * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n",
      "  * https://github.com/tensorflow/addons\n",
      "  * https://github.com/tensorflow/io (for I/O related ops)\n",
      "If you depend on functionality not listed there, please file an issue.\n",
      "\n"
     ]
    }
   ],
   "source": [
    "metricsDict = {'xentropy':rx_loss, 'accuracy_metric':accuracy}\n",
    "meanMetricOpsDict, updateOps, resetOps = cft.create_mean_metrics(metricsDict)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# avg_loss, avg_loss_update, avg_loss_reset = cfh.create_reset_metric(tf.metrics.mean, 'loss_metric', rx_loss)\n",
    "# avg_accuracy, avg_accuracy_update, avg_accuracy_reset = cfh.create_reset_metric(tf.metrics.mean, 'accuracy_metric', accuracy)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "W0917 20:15:42.434755 140237100992320 deprecation.py:323] From /home/rasmus/.conda/envs/claudeOnline/lib/python3.6/site-packages/tensorflow/python/ops/math_grad.py:1250: add_dispatch_support.<locals>.wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use tf.where in 2.0, which has the same broadcast rule as np.where\n"
     ]
    }
   ],
   "source": [
    "# Tx Loss\n",
    "tx_perturbed = tf.placeholder( aeParam.dtype, shape=(None,aeParam.constellationDim) )\n",
    "tx_per_ex_loss  = tf.placeholder( aeParam.dtype, shape=(None,) )\n",
    "# batch_size = tf.constant(trainingParam.batchSize, aeParam.dtype)\n",
    "batch_size = tf.placeholder( aeParam.dtype, shape=() )\n",
    "\n",
    "policy = tf.log( tf.rsqrt( tf.pow(PI*policy_sigma2,DIM) ) * tf.exp( minusOne * tf.square(cfh.norm( tx_perturbed-enc )) / policy_sigma2 ) )\n",
    "tx_vars = [item for item in tf.global_variables() if 'encoder' in item.name]\n",
    "\n",
    "policy_gradient = tf.gradients(policy, tx_vars, grad_ys=tx_per_ex_loss/batch_size)\n",
    "\n",
    "grads_and_vars = [ ( g,v ) for g,v in zip(policy_gradient,tx_vars)]\n",
    "tx_optimizer = tf.train.AdamOptimizer(learning_rate=trainingParam.learningRate).apply_gradients(grads_and_vars)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "init = tf.global_variables_initializer()\n",
    "sess = tf.Session()\n",
    "sess.run(init)\n",
    "\n",
    "saver = tf.train.Saver()\n",
    "checkpoint_path = os.path.join(trainingParam.path,'checkpoint',trainingParam.filename,'best')\n",
    "if not os.path.exists(checkpoint_path):\n",
    "    os.makedirs(checkpoint_path)\n",
    "else:\n",
    "    pass\n",
    "#     print(\"Restoring checkpoint...\", flush=True)\n",
    "#     saver.restore(sess=sess,save_path=checkpoint_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "def TrainReceiver():\n",
    "    nBatches = int(trainingParam.sampleSize/trainingParam.batchSize)\n",
    "    \n",
    "    bestLoss = 10000\n",
    "    \n",
    "    for epoche in range(1, trainingParam.rx_iterations+1):\n",
    "        sess.run(resetOps)\n",
    "        for batch in range(0,nBatches):\n",
    "            data, _, _ = cu.hotOnes(trainingParam.batchSize,(1,0),chParam.M)\n",
    "            ## GENERATE SYMBOLS\n",
    "            feedDict = {X: data}\n",
    "            [outEnc, outEncSeed] = sess.run([enc, enc_seed], feed_dict=feedDict)\n",
    "\n",
    "            ## TRANSMIT THROUGH CHANNEL\n",
    "            feedDict = {channel_in: outEnc}\n",
    "            outChannelOut = sess.run(channel_out, feed_dict=feedDict)\n",
    "\n",
    "            ## Train\n",
    "            feedDict = {X: data,Y:outChannelOut}\n",
    "            sess.run([rx_optimizer, updateOps], feed_dict=feedDict)\n",
    "            \n",
    "        [outAvgLoss, outAvgAccuracy] = sess.run([meanMetricOpsDict['xentropy'], meanMetricOpsDict['accuracy_metric']], feed_dict=feedDict)\n",
    "        \n",
    "        if outAvgLoss < bestLoss:\n",
    "            bestLoss = outAvgLoss\n",
    "            lastImprovement = epoche\n",
    "            saver.save(sess=sess,save_path=checkpoint_path)\n",
    "            \n",
    "        if epoche - lastImprovement > trainingParam.earlyStopping:\n",
    "            print(\"Breaking due to no improvement\")\n",
    "            break;\n",
    "            \n",
    "        if epoche%trainingParam.displayStep == 0:\n",
    "            print('rx_epoche: {} - avgLoss: {} - avgAcc: {}'.format(epoche,outAvgLoss,outAvgAccuracy))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "def TrainTransmitter(bs):\n",
    "    nBatches = int(trainingParam.sampleSize/trainingParam.batchSize)\n",
    "#     nBatches = 1;\n",
    "    \n",
    "    \n",
    "    bestLoss = 10000\n",
    "    \n",
    "    for epoche in range(1, trainingParam.tx_iterations+1):\n",
    "        for batch in range(0,nBatches):\n",
    "            data, _, _ = cu.hotOnes(trainingParam.batchSize,(1,0),chParam.M,batch)\n",
    "            ## GENERATE PERTURBED SYMBOLS\n",
    "            feedDict = {X: data, policy_sigma2: trainingParam.policy_sigma2}\n",
    "            [outEncPert, outEncSeed] = sess.run([enc_pert, enc_seed], feed_dict=feedDict)\n",
    "\n",
    "            ## TRANSMIT THROUGH CHANNEL\n",
    "            feedDict = {channel_in: outEncPert}\n",
    "            outChannelOut = sess.run(channel_out, feed_dict=feedDict)\n",
    "\n",
    "            ## PER EXAMPLE LOSS\n",
    "            feedDict={X:data, Y:outChannelOut}\n",
    "            outPerExampleLoss = sess.run(per_ex_loss,feed_dict=feedDict)\n",
    "\n",
    "            ## TRAIN TRANSMITTER\n",
    "            feedDict={batch_size:bs, X:data,\\\n",
    "                      tx_per_ex_loss: outPerExampleLoss, tx_perturbed: outEncPert, policy_sigma2: trainingParam.policy_sigma2}\n",
    "            sess.run(tx_optimizer,feed_dict=feedDict)\n",
    "            \n",
    "        ## TEST\n",
    "        sess.run(resetOps)\n",
    "        for batch in range(0,nBatches):\n",
    "            data, _, _ = cu.hotOnes(trainingParam.batchSize,(1,0),chParam.M,133700+batch)\n",
    "            ## GENERATE SYMBOLS\n",
    "            feedDict = {X: data}\n",
    "            [outEnc, outEncSeed] = sess.run([enc, enc_seed], feed_dict=feedDict)\n",
    "\n",
    "            ## TRANSMIT THROUGH CHANNEL\n",
    "            feedDict = {channel_in: outEnc}\n",
    "            outChannelOut = sess.run(channel_out, feed_dict=feedDict)\n",
    "            \n",
    "            ## Test\n",
    "            feedDict = {X: data, Y:outChannelOut}\n",
    "            sess.run(updateOps, feed_dict=feedDict)\n",
    "            \n",
    "        [outAvgLoss, outAvgAccuracy] = sess.run([meanMetricOpsDict['xentropy'], meanMetricOpsDict['accuracy_metric']], feed_dict=feedDict)\n",
    "        \n",
    "        if outAvgLoss < bestLoss:\n",
    "            bestLoss = outAvgLoss\n",
    "            lastImprovement = epoche\n",
    "            saver.save(sess=sess,save_path=checkpoint_path)\n",
    "            \n",
    "        if epoche - lastImprovement > trainingParam.earlyStopping:\n",
    "            print(\"Breaking due to no improvement\")\n",
    "            break;\n",
    "            \n",
    "        if epoche%trainingParam.displayStep == 0:\n",
    "            print('tx_epoche: {} - avgLoss: {} - avgAcc: {}'.format(epoche,outAvgLoss,outAvgAccuracy))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "##### 1\n",
      "rx_epoche: 20 - avgLoss: 1.0226750373840332 - avgAcc: 0.601318359375\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "W0917 20:15:46.110948 140237100992320 deprecation.py:323] From /home/rasmus/.conda/envs/claudeOnline/lib/python3.6/site-packages/tensorflow/python/training/saver.py:1276: checkpoint_exists (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use standard file APIs to check for files with this prefix.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tx_epoche: 20 - avgLoss: 0.8661025762557983 - avgAcc: 0.67236328125\n",
      "##### 2\n",
      "rx_epoche: 20 - avgLoss: 0.716338574886322 - avgAcc: 0.7099609375\n",
      "Breaking due to no improvement\n",
      "##### 3\n",
      "rx_epoche: 20 - avgLoss: 0.6421357989311218 - avgAcc: 0.73974609375\n",
      "Breaking due to no improvement\n",
      "##### 4\n",
      "rx_epoche: 20 - avgLoss: 0.628266453742981 - avgAcc: 0.7490234375\n",
      "Breaking due to no improvement\n",
      "tx_epoche: 20 - avgLoss: 0.6423174738883972 - avgAcc: 0.748046875\n",
      "Breaking due to no improvement\n",
      "##### 5\n",
      "rx_epoche: 20 - avgLoss: 0.6206520795822144 - avgAcc: 0.7557373046875\n",
      "Breaking due to no improvement\n",
      "tx_epoche: 20 - avgLoss: 0.6430590152740479 - avgAcc: 0.744384765625\n",
      "##### 6\n",
      "rx_epoche: 20 - avgLoss: 0.5983066558837891 - avgAcc: 0.76025390625\n",
      "Breaking due to no improvement\n",
      "tx_epoche: 20 - avgLoss: 0.6260842680931091 - avgAcc: 0.7623291015625\n",
      "##### 7\n",
      "rx_epoche: 20 - avgLoss: 0.583381712436676 - avgAcc: 0.7652587890625\n",
      "Breaking due to no improvement\n",
      "##### 8\n",
      "rx_epoche: 20 - avgLoss: 0.585932195186615 - avgAcc: 0.7720947265625\n",
      "Breaking due to no improvement\n",
      "Breaking due to no improvement\n",
      "##### 9\n",
      "rx_epoche: 20 - avgLoss: 0.5870329737663269 - avgAcc: 0.7685546875\n",
      "Breaking due to no improvement\n",
      "tx_epoche: 20 - avgLoss: 0.6173610687255859 - avgAcc: 0.750732421875\n",
      "Breaking due to no improvement\n",
      "##### 10\n",
      "rx_epoche: 20 - avgLoss: 0.5913054347038269 - avgAcc: 0.7686767578125\n",
      "Breaking due to no improvement\n",
      "tx_epoche: 20 - avgLoss: 0.6064825057983398 - avgAcc: 0.76318359375\n",
      "##### 11\n",
      "rx_epoche: 20 - avgLoss: 0.5971719026565552 - avgAcc: 0.76611328125\n",
      "Breaking due to no improvement\n",
      "tx_epoche: 20 - avgLoss: 0.5945199728012085 - avgAcc: 0.7650146484375\n",
      "Breaking due to no improvement\n",
      "##### 12\n",
      "rx_epoche: 20 - avgLoss: 0.5724561810493469 - avgAcc: 0.7745361328125\n",
      "Breaking due to no improvement\n",
      "Breaking due to no improvement\n",
      "##### 13\n",
      "rx_epoche: 20 - avgLoss: 0.5564683079719543 - avgAcc: 0.7845458984375\n",
      "tx_epoche: 20 - avgLoss: 0.5799381136894226 - avgAcc: 0.7747802734375\n",
      "##### 14\n",
      "rx_epoche: 20 - avgLoss: 0.5822401642799377 - avgAcc: 0.779052734375\n",
      "Breaking due to no improvement\n",
      "##### 15\n",
      "rx_epoche: 20 - avgLoss: 0.5764646530151367 - avgAcc: 0.7769775390625\n",
      "Breaking due to no improvement\n",
      "Breaking due to no improvement\n",
      "##### 16\n",
      "Breaking due to no improvement\n",
      "Breaking due to no improvement\n",
      "##### 17\n",
      "rx_epoche: 20 - avgLoss: 0.5516932010650635 - avgAcc: 0.7930908203125\n",
      "Breaking due to no improvement\n",
      "Breaking due to no improvement\n",
      "##### 18\n",
      "rx_epoche: 20 - avgLoss: 0.5602746605873108 - avgAcc: 0.7860107421875\n",
      "Breaking due to no improvement\n",
      "##### 19\n",
      "rx_epoche: 20 - avgLoss: 0.5632996559143066 - avgAcc: 0.7850341796875\n",
      "Breaking due to no improvement\n",
      "Breaking due to no improvement\n",
      "##### 20\n",
      "Breaking due to no improvement\n",
      "Breaking due to no improvement\n",
      "##### 21\n",
      "Breaking due to no improvement\n",
      "Breaking due to no improvement\n",
      "##### 22\n",
      "Breaking due to no improvement\n",
      "Breaking due to no improvement\n",
      "##### 23\n",
      "Breaking due to no improvement\n",
      "Breaking due to no improvement\n",
      "##### 24\n",
      "rx_epoche: 20 - avgLoss: 0.549275279045105 - avgAcc: 0.7906494140625\n",
      "tx_epoche: 20 - avgLoss: 0.5663025975227356 - avgAcc: 0.7841796875\n",
      "##### 25\n",
      "rx_epoche: 20 - avgLoss: 0.5532470345497131 - avgAcc: 0.7886962890625\n",
      "Breaking due to no improvement\n",
      "Breaking due to no improvement\n",
      "##### 26\n",
      "rx_epoche: 20 - avgLoss: 0.5425587892532349 - avgAcc: 0.790283203125\n",
      "Breaking due to no improvement\n",
      "Breaking due to no improvement\n",
      "##### 27\n",
      "rx_epoche: 20 - avgLoss: 0.5266492366790771 - avgAcc: 0.799560546875\n",
      "Breaking due to no improvement\n",
      "##### 28\n",
      "Breaking due to no improvement\n",
      "Breaking due to no improvement\n",
      "##### 29\n",
      "Breaking due to no improvement\n",
      "tx_epoche: 20 - avgLoss: 0.555844247341156 - avgAcc: 0.786865234375\n",
      "Breaking due to no improvement\n",
      "##### 30\n",
      "Breaking due to no improvement\n",
      "Breaking due to no improvement\n",
      "##### 31\n",
      "rx_epoche: 20 - avgLoss: 0.564372181892395 - avgAcc: 0.78515625\n",
      "Breaking due to no improvement\n",
      "Breaking due to no improvement\n",
      "##### 32\n",
      "Breaking due to no improvement\n",
      "tx_epoche: 20 - avgLoss: 0.5428370833396912 - avgAcc: 0.7938232421875\n",
      "##### 33\n",
      "rx_epoche: 20 - avgLoss: 0.5431884527206421 - avgAcc: 0.7916259765625\n",
      "tx_epoche: 20 - avgLoss: 0.5499274134635925 - avgAcc: 0.790771484375\n",
      "##### 34\n",
      "Breaking due to no improvement\n",
      "Breaking due to no improvement\n",
      "##### 35\n",
      "rx_epoche: 20 - avgLoss: 0.542556881904602 - avgAcc: 0.79345703125\n",
      "Breaking due to no improvement\n",
      "tx_epoche: 20 - avgLoss: 0.5581865310668945 - avgAcc: 0.787353515625\n",
      "Breaking due to no improvement\n",
      "##### 36\n",
      "rx_epoche: 20 - avgLoss: 0.5437150597572327 - avgAcc: 0.7991943359375\n",
      "tx_epoche: 20 - avgLoss: 0.534855842590332 - avgAcc: 0.798583984375\n",
      "##### 37\n",
      "rx_epoche: 20 - avgLoss: 0.5362117886543274 - avgAcc: 0.79833984375\n",
      "Breaking due to no improvement\n",
      "Breaking due to no improvement\n",
      "##### 38\n",
      "Breaking due to no improvement\n",
      "Breaking due to no improvement\n",
      "##### 39\n",
      "Breaking due to no improvement\n",
      "Breaking due to no improvement\n",
      "##### 40\n",
      "rx_epoche: 20 - avgLoss: 0.5368400812149048 - avgAcc: 0.80029296875\n",
      "Breaking due to no improvement\n",
      "##### 41\n",
      "Breaking due to no improvement\n",
      "tx_epoche: 20 - avgLoss: 0.5489000678062439 - avgAcc: 0.79052734375\n",
      "##### 42\n",
      "rx_epoche: 20 - avgLoss: 0.5397111773490906 - avgAcc: 0.7965087890625\n",
      "Breaking due to no improvement\n",
      "tx_epoche: 20 - avgLoss: 0.5536429286003113 - avgAcc: 0.790283203125\n",
      "##### 43\n",
      "Breaking due to no improvement\n",
      "Breaking due to no improvement\n",
      "##### 44\n",
      "Breaking due to no improvement\n",
      "tx_epoche: 20 - avgLoss: 0.5450145602226257 - avgAcc: 0.7906494140625\n",
      "##### 45\n",
      "rx_epoche: 20 - avgLoss: 0.5512761473655701 - avgAcc: 0.7899169921875\n",
      "Breaking due to no improvement\n",
      "Breaking due to no improvement\n",
      "##### 46\n",
      "rx_epoche: 20 - avgLoss: 0.5375517010688782 - avgAcc: 0.79296875\n",
      "Breaking due to no improvement\n",
      "Breaking due to no improvement\n",
      "##### 47\n",
      "Breaking due to no improvement\n",
      "tx_epoche: 20 - avgLoss: 0.5497486591339111 - avgAcc: 0.7933349609375\n",
      "Breaking due to no improvement\n",
      "##### 48\n",
      "Breaking due to no improvement\n",
      "Breaking due to no improvement\n",
      "##### 49\n",
      "rx_epoche: 20 - avgLoss: 0.5507993698120117 - avgAcc: 0.78857421875\n",
      "tx_epoche: 20 - avgLoss: 0.5432595610618591 - avgAcc: 0.7996826171875\n"
     ]
    }
   ],
   "source": [
    "for jj in range(1,50):\n",
    "    print(\"##### {}\".format(jj))\n",
    "    TrainReceiver()\n",
    "    saver.restore(sess=sess,save_path=checkpoint_path)\n",
    "    TrainTransmitter(trainingParam.batchSize*1)\n",
    "    saver.restore(sess=sess,save_path=checkpoint_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQoAAAD8CAYAAACPd+p5AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAEAFJREFUeJzt3W+MXNV9xvHvYxuDVFWJ8S5g/qzNKlYbIrUNHllLkCqnJRWxKlwSUI0iBSqsFW1Q39YSEql4U+jLCLfEoShQISBFAjbFxOWvaF8s9W5kY4xLWVbZsloLO8ZyhJpiNvvri7m0483MnlnunTt3Zp6PtNo7M4c5vxktj+8598wZRQRmZitZ0+0CzKz6HBRmluSgMLMkB4WZJTkozCzJQWFmSQ4KM0tyUJhZkoPCzJLWdbuAlQwNDcWWLVu6XYZZ35qenv55RAyn2lU6KLZs2cLU1FS3yzDrW5Lm2mnnoYeZJRUSFJIekXRS0lstHt8h6aykw9nPvUX0a2blKGro8UPgQeCxFdr8a0T8cUH9mVmJCjmjiIjXgQ+LeC4zq54y5yiuk3RE0guSvlRiv2aWU1lXPX4KbI6IjyTtBJ4FtjZrKGkcGAcYGRkpqTwzW0kpZxQR8YuI+Cg7PgBcIGmoRdv9EVGLiNrwcPLyrpmVoJSgkHSZJGXH27N+T5fRt5nlV8jQQ9ITwA5gSNI88F3gAoCIeAi4BfhzSYvAL4Hd4c06zXpGIUEREbclHn+Q+uVTM+tBXplpZkkOCjNLclCYWZKDwsySHBRmluSgsEJMz51h36szTM+d6XYp1gGV3rjGesP03Bm+9fAk5xaXWL9uDY/vGWPb5g3dLssK5DMKy21y9jTnFpdYCvhkcYnJWS+67TcOCsttbHQj69etYa3ggnVrGBvd2O2SrGAeelhu2zZv4PE9Y0zOnmZsdKOHHX3IQWGF2LZ5gwOij3noYWZJDgozS3JQmFmSg8LMkhwUZpbkoDCzJAeFmSU5KMwsyUFhZkkOCjNLclCYWZKDwsySHBRmluSgMLOkQoJC0iOSTkp6q8XjkvQ9STOS3pR0bRH9mlk5ijqj+CFw4wqPfx3Ymv2MA39fUL9mVoJCgiIiXgc+XKHJLuCxqJsEPi9pUxF9m1nnlTVHcQXwfsPt+ey+geSt7a3XlLUVnprcF00bSuPUhyeMjIx0sqau8Nb21ovKOqOYB65quH0lsNCsYUTsj4haRNSGh4dLKa5M3treelFZQTEBfDu7+jEGnI2IEyX1XSne2t56USFDD0lPADuAIUnzwHeBCwAi4iHgALATmAH+G/izIvrtRd7a3npRIUEREbclHg/gO0X01Q+8tb31Gq/MNLMkB4WZJTkozNo0yOtf/JWCZm0Y9PUvPqMwa8Ogr39xUJi1YdDXv3joYdaGQV//4qAwa9Mgr3/x0MPMkhwUZpbkoDCzJAeFmSU5KMwsyUFhZkkOCjNLclCYWZKDwsySHBRmluSgMLMkB4WZJTkorBSDvDtUP/CnR63jBn13qH7gMwrruEHfHaofOCis4wZ9d6h+4KGHddyg7w7VDwo5o5B0o6R3JM1I2tvk8TsknZJ0OPvZU0S/1ju2bd7Ad776BYdEj8p9RiFpLbAP+Br1by0/JGkiIt5e1vSpiLg7b39mVr4izii2AzMRMRsR54AngV0FPK+ZVUQRQXEF8H7D7fnsvuW+KelNSU9LuqqAfs2sJEUEhZrcF8tu/xjYEhG/A7wEPNryyaRxSVOSpk6dOlVAeWaWVxFBMQ80niFcCSw0NoiI0xHxcXbzB8C2Vk8WEfsjohYRteHh4QLKM7O8igiKQ8BWSVdLWg/sBiYaG0ja1HDzJuB4Af2aWUlyX/WIiEVJdwMHgbXAIxFxTNJ9wFRETAB/KekmYBH4ELgjb79mVh5FLJ9OqI5arRZTU1PdLsOsb0majohaqp2XcJtZkoPCzJIcFAPAe0FYXv5QWJ/zXhBWBJ9R9DnvBWFFcFD0Oe8FYUXw0KPPeS8IK4KDYgBs27zBAWG5eOhhZkkOiga+jGjWnIceGV9GNGvNZxQZX0Y0a81BkfFlRLPWPPTI+DKiWWsOiga+jGjWnIceZpbkoDCzJAeF9QWvgeksz1FYz/MamM7zGYX1PK+B6TwHhfU8r4HpPA89rOd5DUznOSisL/T6GpjpuTOVDjoHhVmX9cJkrOcozLqsFyZjCwkKSTdKekfSjKS9TR6/UNJT2eNvSNpSRL9m/aAXJmNzDz0krQX2AV+j/s3mhyRNRMTbDc3uBM5ExBck7QYeAP40b99m/aAXJmOLmKPYDsxExCyApCeBXUBjUOwC/jo7fhp4UJKiyl98alaiqk/GFjH0uAJ4v+H2fHZf0zYRsQicBap3fmVmTRURFGpy3/IzhXba1BtK45KmJE2dOnUqd3Fmll8RQTEPXNVw+0pgoVUbSeuAzwEfNnuyiNgfEbWIqA0PDxdQnpnlVURQHAK2Srpa0npgNzCxrM0EcHt2fAvwiucnzHpH7snMiFiUdDdwEFgLPBIRxyTdB0xFxATwD8A/SpqhfiaxO2+/ZlaeQlZmRsQB4MCy++5tOP4f4NYi+rL+UPUly3Y+L+G20vXCkmU7n5dwW+l6Ycmync9BYaXrhSXLdj4PPQZA1eYDemHJsp3PQdHnqjofUPUly3Y+Dz36nOcDrAgOij7n+QArgocefc7zAVYEB8UA8HyA5eWhh5klOSjMLMlBYWZJDgozS3JQmFmSg8LMkhwUZpbkoDCzJAeFmSU5KMwsyUFhZkkOCjNLclCYWZKDwsySHBSfwfTcGfa9OsP03Jlul2JWCu9HsUpV3YPSrJNynVFIuljSi5LezX43/T9G0q8kHc5+ln8vaU/xHpQ2iPIOPfYCL0fEVuDl7HYzv4yI38t+bsrZZ1d5D0obRHmHHruAHdnxo8BrwF/lfM5K8x6UNojyBsWlEXECICJOSLqkRbuLJE0Bi8D9EfFszn67yntQdl/VvtSo3yWDQtJLwGVNHrpnFf2MRMSCpFHgFUlHI+K9Fv2NA+MAIyMjq+jCBoUnlMuXDIqIuKHVY5I+kLQpO5vYBJxs8RwL2e9ZSa8BXwaaBkVE7Af2A9RqtUi+Ahs4zSaUHRSdlXcycwK4PTu+HXhueQNJGyRdmB0PAdcDb+fs1waYJ5TLl3eO4n7gR5LuBP4LuBVAUg24KyL2AF8Evi9piXow3R8RDgr7zDyhXD5FVPfsvlarxdTUVLfLMOtbkqYjopZq5yXcZpbkoDCzJAeFmSU5KMwsyUFhSf5Yvflj5rYir4I08BmFJfhj9QYOCkvwKkgDDz0swasgV68fP9nqoLAkf6y+ff06p+Ohh1mB+nVOx0FhVqB+ndPx0MOsQP06p+OgMCtYP87peOhhZkkOCjNLclCYWZKDwsySHBRmluSgMLMkB4WZJTkozCzJQWFmSQ4KM0tyUJhZUq6gkHSrpGOSlrKvEWzV7kZJ70iakbQ3T59mVr68ZxRvAd8AXm/VQNJaYB/wdeAa4DZJ1+Ts18xKlOvToxFxHEDSSs22AzMRMZu1fRLYhb/R3KxnlDFHcQXwfsPt+ew+M+sRyTMKSS8BlzV56J6IeK6NPpqdbrT8CnVJ48A4wMjISBtPb2adlgyKiLghZx/zwFUNt68EFlbobz+wH6BWq7UMFDMrTxlDj0PAVklXS1oP7AYmSujXzAqS9/LozZLmgeuA5yUdzO6/XNIBgIhYBO4GDgLHgR9FxLF8ZZtZmfJe9XgGeKbJ/QvAzobbB4ADefoys+7xykwzS3JQmFmSg8LMkhwUZpbkoDCzJAdFhU3PnWHfqzNMz53pdik24PyVghU1PXeGbz08ybnFJdavW8Pje8b67mvqrHf4jKKiJmdPc25xiaWATxaXmJw93e2SbIA5KCpqbHQj69etYa3ggnVrGBvd2O2SbIB56FFR2zZv4PE9Y0zOnmZsdKOHHdZVDooK27Z5gwPCKsFDDzNLclCYWZKDwsySHBRmluSgMLMkB4VVlpewV4cvj1oleQl7tfiMwirJS9irxUFhleQl7NXioYdVkpewV4uDwirLS9irw0MPM0tyUJhZkoPCzJLyfqXgrZKOSVqSVFuh3c8kHZV0WNJUnj7NrHx5JzPfAr4BfL+Ntl+NiJ/n7M/MuiDvd48eB5BUTDVmVkllzVEE8C+SpiWNl9SnmRUkeUYh6SXgsiYP3RMRz7XZz/URsSDpEuBFSf8REa+36G8cGAcYGRlp8+nNrJOSQRERN+TtJCIWst8nJT0DbAeaBkVE7Af2A9Rqtcjbt5nl1/Ghh6TfkPSbnx4Df0R9EtTMekTey6M3S5oHrgOel3Qwu/9ySQeyZpcC/ybpCPDvwPMR8ZM8/ZpZufJe9XgGeKbJ/QvAzux4FvjdPP2YWXd5ZaaZJTkozCxJEdW9sCDpFDDX5KEhoAqrPF3H+VzH+Xqhjs0RMZx6gkoHRSuSpiKi5WdLXIfrcB3F1uGhh5klOSjMLKlXg2J/twvIuI7zuY7z9U0dPTlHYWbl6tUzCjMrUU8ERVV20lpFHTdKekfSjKS9HajjYkkvSno3+910q2pJv8rei8OSJgrsf8XXJ+lCSU9lj78haUtRfa+yjjsknWp4D/Z0oIZHJJ2U1PTzS6r7Xlbjm5KuLbqGNuvYIelsw3tx76o6iIjK/wBfBH4LeA2ordDuZ8BQN+sA1gLvAaPAeuAIcE3BdfwtsDc73gs80KLdRx14D5KvD/gL4KHseDfwVJfquAN4sMN/m78PXAu81eLxncALgIAx4I0u1bED+OfP+vw9cUYREccj4p0eqWM7MBMRsxFxDngS2FVwKbuAR7PjR4E/Kfj5V9LO62us72ngD1X8NmhlvM9JUd9X5cMVmuwCHou6SeDzkjZ1oY5ceiIoVqEKO2ldAbzfcHs+u69Il0bECYDs9yUt2l0kaUrSpKSiwqSd1/d/bSJiETgLFP2dgO2+z9/MTvmflnRVwTW0o4y/h3ZdJ+mIpBckfWk1/2Flvims7J20OlhHs385V31paaU6VvE0I9n7MQq8IuloRLy32lqWl9bkvuWvr5D3oIA6fgw8EREfS7qL+lnOHxRcR0oZ70U7fkp9ufZHknYCzwJb2/2PKxMUUfJOWh2sYx5o/JfrSmBhtU+yUh2SPpC0KSJOZKexJ1s8x6fvx6yk14AvUx/X59HO6/u0zbykdcDnKP60OFlHRDR+BfoPgAcKrqEdhfw95BURv2g4PiDp7yQNRZs74/fN0KNCO2kdArZKulrSeuqTeYVdcchMALdnx7cDv3amI2mDpAuz4yHgeuDtAvpu5/U11ncL8EpkM2oFStaxbC7gJuB4wTW0YwL4dnb1Yww4++mwsUySLvt0nkjSdur/759e+b9q0MkZ4QJndG+mnswfAx8AB7P7LwcOZMej1Ge+jwDHqA8VSq8j/n+m+z+p/+vdiTo2Ai8D72a/L87urwEPZ8dfAY5m78dR4M4C+/+11wfcB9yUHV8E/BMwQ31Xs9EO/V2k6vib7G/hCPAq8NsdqOEJ4ATwSfa3cSdwF3BX9riAfVmNR1nhql2H67i74b2YBL6ymuf3ykwzS+qboYeZdY6DwsySHBRmluSgMLMkB4WZJTkozCzJQWFmSQ4KM0v6X99bvxEtk2LAAAAAAElFTkSuQmCC\n",
      "text/plain": [
       "<Figure size 432x288 with 1 Axes>"
      ]
     },
     "metadata": {
      "needs_background": "light"
     },
     "output_type": "display_data"
    }
   ],
   "source": [
    "pred_const = sess.run(enc_seed)\n",
    "plt.plot(pred_const[:,0],pred_const[:,1],'.')\n",
    "plt.axis('square');\n",
    "lim_ = 1.6\n",
    "plt.xlim(-lim_,lim_);\n",
    "plt.ylim(-lim_,lim_);"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
