{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Imports\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt\n",
    "import time\n",
    "from functools import partial\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"1\" "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# def filt_as_var(filt_name, width, in_ch, out_ch):\n",
    "#     \"\"\"Takes in integers of 1-D filter width and num channels. \n",
    "#         Returns a variable of same width and num_channels in (width, #inp_chan, #out_chan) format\"\"\"\n",
    "#     return tf.get_variable(name = filt_name, shape = [width, 1, n_ch], initializer = tf.contrib.layers.xavier_initializer(), \n",
    "#                     dtype = tf.float32)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {},
   "outputs": [],
   "source": [
    "def split_to_blocks(inps, block_len):\n",
    "    \"\"\"Takes in a (?, enc_inpt) dim matrix, and splits it into a arrays of dim (?, self.config.block_len) for all except the last array\n",
    "    which is (?, n) where n = chan_enc_inp % self.config.block_len\"\"\"\n",
    "    inp_len = inps.get_shape().as_list()[1]\n",
    "    num_full_mats = int(np.floor(inp_len / block_len))\n",
    "#     print(num_full_mats)\n",
    "    dim_last_mat = int(inp_len % block_len)\n",
    "#     print(dim_last_mat)\n",
    "    split_inps = []\n",
    "    for i in range(num_full_mats):\n",
    "        new_split = tf.slice(inps, [0, block_len * i], [-1, block_len])\n",
    "        split_inps.append(new_split)\n",
    "    if (dim_last_mat > 0):\n",
    "        split_inps.append(tf.slice(inps, [0, block_len * num_full_mats], [-1, dim_last_mat]))\n",
    "    return split_inps\n",
    "    \n",
    "def append_blocks(self,dec_outs):\n",
    "    \"\"\"Appends list of decoder out tensors, then passes them through binarizer to match input length.\"\"\"\n",
    "    concatenated = tf.concat(dec_outs, axis=1)\n",
    "#         decoded = tf.layers.dense(concatenated, self.config.chan_dec_out_len, kernel_initializer = tf.contrib.layers.xavier_initializer())\n",
    "    return concatenated"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "metadata": {},
   "outputs": [],
   "source": [
    "class ChannelEncoder(object):\n",
    "    def __init__(self, enc_inputs, isTrain, config):\n",
    "        self.isTrain = isTrain\n",
    "        self.config = config\n",
    "#         self.enc_input_lst = enc_input_lst\n",
    "        \n",
    "        self.enc_out = []\n",
    "        block_len = int(self.config.chan_enc_in_len / self.config.num_blocks)\n",
    "        self.split_inps = split_to_blocks(enc_inputs, block_len) #List of split tensors \n",
    "#         print(\"Inputs split: \", self.split_inps)\n",
    "        for ind, input_elem in enumerate(self.split_inps):\n",
    "            enc_out_nonbin = self.build_chan_enc_net(ind, input_elem)\n",
    "            self.enc_out.append(self.binarize(enc_out_nonbin))          \n",
    "        self.enc_out = tf.concat(self.enc_out, 1)\n",
    "        \n",
    "    def build_chan_enc_net(self, ind, enc_input):\n",
    "        \n",
    "        enc_filters = self.config.enc_filters #list of dicts for enc filter dims\n",
    "        num_layers = len(enc_filters)\n",
    "        \n",
    "        layer_outs = []\n",
    "        layer_input = tf.expand_dims(enc_input, len(enc_input.shape))\n",
    "\n",
    "        with tf.variable_scope(\"encoder\", reuse = tf.AUTO_REUSE):\n",
    "            \n",
    "            for l, layer_dims in enumerate(enc_filters): #iterate over encoder layers \n",
    "                n_ch = layer_dims[\"n_ch\"]\n",
    "                filt_sz = layer_dims[\"filt_sz\"]\n",
    "                strides = layer_dims[\"strides\"]\n",
    "                #With pooling\n",
    "    #             pool_size = layer_dims[\"pool_size\"]\n",
    "    #             pool_strides = layer_dims[\"pool_strides\"]\n",
    "\n",
    "                num_parallels = len(filt_sz)\n",
    "\n",
    "                parallels = []\n",
    "                for p in range(num_parallels): #iterate over parallels\n",
    "                    filt_name = \"enc\"+\"L\"+str(l+1)+\"P\"+str(p+1)\n",
    "                    n_chan_prev = 1 \n",
    "                    filt_var1 = tf.get_variable(name = filt_name, shape = [filt_sz[p], n_chan_prev, n_ch[p]], initializer = tf.contrib.layers.xavier_initializer(), \n",
    "                        dtype = tf.float32)\n",
    "                    Z1 = tf.nn.conv1d(layer_input, filt_var1, strides[p], 'SAME')\n",
    "                    A1 = tf.nn.relu(Z1)\n",
    "    #                 P = tf.layers.max_pooling1d(A, pool_size[p], pool_strides[p], padding = \"SAME\")\n",
    "    #                 filt_var2 = tf.get_variable(name = filt_name + \"2\", shape = [filt_sz[p], int(n_ch[p] / 3), int(2 * n_ch[p]/3)], initializer = tf.contrib.layers.xavier_initializer(), \n",
    "    #                     dtype = tf.float32)\n",
    "    #                 Z2 = tf.nn.conv1d(A1, filt_var2, strides[p], 'SAME')\n",
    "    #                 A2 = tf.nn.relu(Z2)\n",
    "    #                 filt_var3 = tf.get_variable(name = filt_name + \"3\", shape = [filt_sz[p], int(2 * n_ch[p] / 3), n_ch[p]], initializer = tf.contrib.layers.xavier_initializer(), \n",
    "    #                     dtype = tf.float32)\n",
    "    #                 Z3 = tf.nn.conv1d(A2, filt_var3, strides[p], 'SAME')\n",
    "    #                 A3 = tf.nn.relu(Z3)\n",
    "                    P = A1\n",
    "                    F = tf.contrib.layers.flatten(P)\n",
    "                    parallels.append(F)\n",
    "                concatenated = tf.concat(parallels, axis=1)\n",
    "                print(concatenated)\n",
    "                layer_outs.append(concatenated)\n",
    "                layer_input = tf.expand_dims(layer_outs[-1], len(layer_outs[-1].shape))\n",
    "\n",
    "    #         print(layer_outs[-1])\n",
    "\n",
    "            fc_acts = []\n",
    "            fc_acts.append(layer_outs[-1])\n",
    "\n",
    "            for fc in range(len(self.config.enc_FCs) - 1):\n",
    "                fc = tf.contrib.layers.fully_connected(fc_acts[-1], int(self.config.enc_FCs[fc] / 10), activation_fn = tf.nn.relu, )\n",
    "                fc_acts.append(fc)\n",
    "\n",
    "            fc_out = tf.layers.dense(fc_acts[-1], int(self.config.enc_FCs[-1] / 10), activation = tf.tanh)\n",
    "        \n",
    "        return tf.expand_dims(fc_out, len(fc_out.shape)) #Expand dims to fit conv across channel\n",
    "    \n",
    "    def training_binarizer(self, input_layer):\n",
    "        \"\"\"Binarizer function used at training\n",
    "        \"\"\"\n",
    "        prob = tf.truediv(tf.add(1.0, input_layer), 2.0)\n",
    "        bernoulli = tf.contrib.distributions.Bernoulli(probs=prob, dtype=tf.float32)\n",
    "        return 2 * bernoulli.sample() - 1\n",
    "\n",
    "    def test_binarizer(self, input_layer):\n",
    "        \"\"\"Binarizer function used during testing\n",
    "        \"\"\"\n",
    "        ones = tf.ones_like(input_layer,dtype=tf.float32)\n",
    "        neg_ones = tf.scalar_mul(-1.0, ones)\n",
    "        return tf.where(tf.less(input_layer,0.0), neg_ones, ones)\n",
    "\n",
    "    def binarize(self, reduced_states):\n",
    "        binarized = tf.cond(self.isTrain,\n",
    "                            partial(self.training_binarizer, reduced_states),\n",
    "                            partial(self.test_binarizer, reduced_states))\n",
    "\n",
    "        pass_through = tf.identity(reduced_states) # this is used for pass through gradient back prop\n",
    "        return pass_through + tf.stop_gradient(binarized - pass_through)\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "metadata": {},
   "outputs": [],
   "source": [
    "class ISI_Channel(object):\n",
    "    def __init__(self, enc_out,  config, noise = True, dynamic_channel = False):\n",
    "        self.config = config\n",
    "        self.chan_param = config.channel[\"chan_param\"]\n",
    "        self.type = config.channel[\"type\"]\n",
    "        self.chan_in = enc_out\n",
    "        self.filter = self.get_filter(self.chan_param[\"ISI_distrib\"]) #returns 3D tensor\n",
    "        self.chan_out = self.build_channel()\n",
    "#         print(self.chan_out)\n",
    "        if noise == True:\n",
    "            self.chan_out = self.gaussian_noise_layer(self.chan_out, self.chan_param[\"noise_std\"])\n",
    "                \n",
    "    def build_channel(self):\n",
    "        if (self.type == \"ISI\"):\n",
    "            \n",
    "            chan_out = tf.nn.conv1d(self.chan_in, self.filter, self.chan_param[\"stride\"], 'SAME', name = \"chan_ISI\")\n",
    "            \n",
    "        return chan_out \n",
    "        \n",
    "    def get_filter(self, distrib = \"rayleigh\"):\n",
    "        if distrib == \"rayleigh\":\n",
    "            sf = self.chan_param[\"distrib_param\"]\n",
    "            chan_filt_dims = (self.chan_param[\"chan_filt_width\"], 1, 1) # (width, num_in_chan, num_out_chan)\n",
    "            filt_vals = np.random.rayleigh(scale = sf, size = chan_filt_dims)\n",
    "            \n",
    "        return tf.convert_to_tensor(filt_vals, dtype = tf.float32)\n",
    "        \n",
    "    def gaussian_noise_layer(self, input_layer, std, name=None):\n",
    "#         print(input_layer)\n",
    "        noise = tf.random_normal(shape=tf.shape(input_layer), mean=0.0,\n",
    "                                 stddev=std, dtype=tf.float32, name=\"chan_noise\")\n",
    "        return input_layer + noise"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 96,
   "metadata": {},
   "outputs": [],
   "source": [
    "class ChannelDecoder(object):\n",
    "    def __init__(self, chan_out, isTrain, config):\n",
    "        self.isTrain = isTrain\n",
    "        self.config = config\n",
    "        self.dec_input = chan_out\n",
    "#         self.dec_out = self.build_chan_dec_net()\n",
    "#         self.dec_labels = tf.where(tf.less(self.dec_out,0.0), \n",
    "#                                  -1 * tf.ones_like(self.dec_out,dtype=tf.float32), \n",
    "#                                  tf.ones_like(self.dec_out,dtype=tf.float32))\n",
    "        self.dec_outs = []\n",
    "        chan_out = tf.squeeze(chan_out, [2])\n",
    "        print(\"channel_out:\", chan_out)\n",
    "        dec_block_len = int(chan_out.get_shape().as_list()[1] / self.config.num_blocks)\n",
    "        split_chan_outs = split_to_blocks(chan_out, dec_block_len)\n",
    "        for ind, chan_out_elem in enumerate(split_chan_outs):\n",
    "            self.dec_outs.append(self.build_chan_dec_net(chan_out_elem, ind))\n",
    "            \n",
    "        self.dec_out = tf.concat(self.dec_outs, 1)\n",
    "        print(self.dec_out)\n",
    "#         self.decoded = tf.layers.dense()\n",
    "        fc_1 = tf.contrib.layers.fully_connected(self.dec_out, 1024, activation_fn = tf.nn.relu)\n",
    "        fc_2 = tf.contrib.layers.fully_connected(fc_1, 400, activation_fn = None)\n",
    "#         fc_3 = tf.contrib.layers.fully_connected(fc_2, 400, activation_fn = None)\n",
    "\n",
    "        self.decoded = fc_2\n",
    "        \n",
    "    def build_chan_dec_net(self, chan_out, ind):\n",
    "        \n",
    "        dec_filters = self.config.dec_filters #list of dicts for enc filter dims\n",
    "        num_layers = len(dec_filters)\n",
    "        \n",
    "        layer_outs = []\n",
    "        layer_input = tf.expand_dims(chan_out, len(chan_out.shape))\n",
    "        with tf.variable_scope(\"decoder\", reuse = tf.AUTO_REUSE):\n",
    "            for l, layer_dims in enumerate(dec_filters): #iterate over decoder layers \n",
    "                n_ch = layer_dims[\"n_ch\"]\n",
    "                filt_sz = layer_dims[\"filt_sz\"]\n",
    "                strides = layer_dims[\"strides\"]\n",
    "\n",
    "                #With pooling\n",
    "    #             pool_size = layer_dims[\"pool_size\"]\n",
    "    #             pool_strides = layer_dims[\"pool_strides\"]\n",
    "\n",
    "                num_parallels = len(filt_sz)\n",
    "\n",
    "                parallels = []\n",
    "                for p in range(num_parallels): #iterate over parallels\n",
    "                    filt_name = \"dec\"+\"L\"+str(l+1)+\"P\"+str(p+1)\n",
    "                    n_chan_prev = 1 \n",
    "                    filt_var1 = tf.get_variable(name = filt_name, shape = [filt_sz[p], n_chan_prev, n_ch[p]], initializer = tf.contrib.layers.xavier_initializer(), \n",
    "                        dtype = tf.float32)\n",
    "                    Z1 = tf.nn.conv1d(layer_input, filt_var1, strides[p], 'SAME')\n",
    "                    A1 = tf.nn.relu(Z1)\n",
    "    # #                 P = tf.layers.max_pooling1d(A, pool_size[p], pool_strides[p], padding = \"SAME\")\n",
    "    #                 filt_var2 = tf.get_variable(name = filt_name + \"2\", shape = [filt_sz[p], int(n_ch[p] / 3), int(2 * n_ch[p]/3)], initializer = tf.contrib.layers.xavier_initializer(), \n",
    "    #                     dtype = tf.float32)\n",
    "    #                 Z2 = tf.nn.conv1d(A1, filt_var2, strides[p], 'SAME')\n",
    "    #                 A2 = tf.nn.relu(Z2)\n",
    "    #                 filt_var3 = tf.get_variable(name = filt_name + \"3\", shape = [filt_sz[p], int(2 * n_ch[p] / 3), n_ch[p]], initializer = tf.contrib.layers.xavier_initializer(), \n",
    "    #                     dtype = tf.float32)\n",
    "    #                 Z3 = tf.nn.conv1d(A2, filt_var3, strides[p], 'SAME')\n",
    "    #                 A3 = tf.nn.relu(Z3)\n",
    "    #                 P = A3\n",
    "                    F = tf.contrib.layers.flatten(A1)\n",
    "                    parallels.append(F)\n",
    "                concatenated = tf.concat(parallels, axis=1)\n",
    "                print(concatenated)\n",
    "                layer_outs.append(concatenated)\n",
    "                layer_input = tf.expand_dims(layer_outs[-1], len(layer_outs[-1].shape))\n",
    "\n",
    "            fc_acts = []\n",
    "            fc_acts.append(layer_outs[-1])\n",
    "\n",
    "            for fc in range(len(self.config.dec_FCs) - 1):\n",
    "                fc = tf.contrib.layers.fully_connected(fc_acts[-1], self.config.dec_FCs[-1] , activation_fn = tf.nn.relu)\n",
    "                fc_acts.append(fc)\n",
    "\n",
    "            fc_out = tf.contrib.layers.fully_connected(fc_acts[-1], self.config.dec_FCs[-1], activation_fn = tf.tanh)\n",
    "\n",
    "        return fc_out"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 97,
   "metadata": {},
   "outputs": [],
   "source": [
    "class ChannelSystem():\n",
    "    def __init__(self, config):\n",
    "        self.config = config\n",
    "        \n",
    "        #self.learn_rate = tf.placeholder(tf.float32,shape=(), name='learn_rate')\n",
    "        self.lr = tf.placeholder(tf.float32, shape = [], name = 'lr')\n",
    "        self.X = tf.placeholder(tf.float32, shape = [None, self.config.chan_enc_in_len], name = 'dataset_iter_X')\n",
    "        self.Y = tf.placeholder(tf.float32, shape = [None, self.config.chan_dec_out_len], name = 'dataset_iter_Y')\n",
    "#         print(\"X:\", self.X) \n",
    "        self.isTrain = tf.placeholder(tf.bool,shape=(), name='isTrain')\n",
    "#         self.chan_enc_inputs = tf.placeholder(tf.float32, shape = [None, self.config.chan_enc_in_len], name = 'enc_inputs')\n",
    "#         self.dec_targets = tf.placeholder(tf.float32, shape = [None, self.config.chan_dec_out_len], name = 'dec_targets')\n",
    "            \n",
    "        self.dataset = tf.data.Dataset.from_tensor_slices((self.X, self.Y)).shuffle(buffer_size=100).batch(config.batch_size).repeat()\n",
    "        \n",
    "        self.iterator = self.dataset.make_initializable_iterator()\n",
    "        \n",
    "        self.chan_enc_inputs, self.dec_targets = self.iterator.get_next()\n",
    "        #Build channel coder\n",
    "        print(\"Chan enc inputs: \", self.chan_enc_inputs)\n",
    "        \n",
    "        self.channel_encoder = ChannelEncoder(self.chan_enc_inputs, self.isTrain, config)\n",
    "        print(\"Channel input: \", self.channel_encoder.enc_out) \n",
    "        self.channel = ISI_Channel(self.channel_encoder.enc_out, config, noise = True)\n",
    "        print(\"Channel output: \", self.channel.chan_out) \n",
    "        self.channel_decoder = ChannelDecoder(self.channel.chan_out, self.isTrain, config)\n",
    "        print(\"Dec out: \", self.channel_decoder.dec_out) \n",
    "        self.dec_labels = self.get_labels(self.channel_decoder.decoded) \n",
    "        self.accuracy = self.define_accuracy()\n",
    "        self.loss, self.train_op = self.define_loss()\n",
    "        \n",
    "        \n",
    "    def get_labels(self, reals):\n",
    "        return tf.where(tf.less(reals,0.0), \n",
    "                                 -1 * tf.ones_like(reals,dtype=tf.float32), \n",
    "                                 tf.ones_like(reals,dtype=tf.float32))\n",
    "    \n",
    "    \"\"\"\n",
    "    Script to generate train and test data\n",
    "    \"\"\"\n",
    "    def gen_bin_data(self, num_samples, sample_len, seed_num = None):\n",
    "        \n",
    "        if (seed_num != None):\n",
    "            np.random.seed(seed_num)\n",
    "        randMat = np.random.rand(num_samples, sample_len)\n",
    "        binMat = np.where(randMat > 0.5, 1.0, -1.0)\n",
    "        return binMat\n",
    "    \n",
    "    def define_accuracy(self):\n",
    "        eq_indicator = tf.cast(tf.equal(self.dec_labels, self.dec_targets), dtype=tf.float32)\n",
    "        return tf.reduce_mean(eq_indicator)\n",
    "\n",
    "    def define_loss(self):\n",
    "        adapted_targets = tf.cast(self.dec_targets > 0, tf.float32)\n",
    "        loss = tf.losses.hinge_loss(adapted_targets, self.channel_decoder.decoded)\n",
    "\n",
    "        train_op = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(loss) \n",
    "        return loss, train_op\n",
    "    \n",
    "    def train(self, sess):\n",
    "        params = tf.trainable_variables()\n",
    "        num_params = sum(\n",
    "            map(lambda t: np.prod(tf.shape(t.value()).eval()), params))\n",
    "        print('Total model parameters: ', num_params)\n",
    "        self.training_counter = 0\n",
    "        accuracies = []\n",
    "        try:\n",
    "            tic = time.time() \n",
    "            curr_lr = self.config.init_lr\n",
    "            for epoch in range(self.config.n_epochs):\n",
    "            \n",
    "                # =============================  Train on Training Data ===============================\n",
    "                train_data = self.gen_bin_data(self.config.train_size, self.config.chan_enc_in_len)\n",
    "                sess.run(self.iterator.initializer, feed_dict = {self.X: train_data, self.Y: train_data})\n",
    "                if (epoch > 0): curr_lr = curr_lr / np.sqrt(epoch)\n",
    "                train_fd = {self.isTrain: True, self.lr : curr_lr}\n",
    "\n",
    "                for i in range(self.config.batch_per_epoch):\n",
    "                    \n",
    "#                     _, loss, channel_filt_1 = sess.run([self.train_op, self.loss, self.channel.filter], feed_dict = train_fd)\n",
    "#                     channel_filt_2 = sess.run(self.channel.filter)\n",
    "#                     print(channel_filt_1, channel_filt_2)\n",
    "                    loss, _, optimizer_lr = sess.run([self.loss, self.train_op, self.lr], train_fd)\n",
    "                    self.training_counter += 1\n",
    "#                     print(sess.run(self.channel.filter, train_fd))\n",
    "                    if self.training_counter % self.config.print_every == 0:\n",
    "                        toc = time.time()\n",
    "\n",
    "                        acc = sess.run(self.accuracy, train_fd)\n",
    "\n",
    "                        print(\"Epoch: \", epoch + 1, \n",
    "                              \"Accuracy: \", acc,\n",
    "                              \"Training iteration: \", self.training_counter,\n",
    "                              \"Training time: \", int(toc-tic), \"s\",\n",
    "                              \"Training loss: \", loss,\n",
    "                              \"Learning rate\", optimizer_lr)\n",
    "                        accuracies.append(acc)\n",
    "\n",
    "#                     if self.training_counter % self.config.save_every == 0:\n",
    "#                         self.saver.save(sess, self.config.model_save_path, global_step=self.training_counter // self.config.save_every)\n",
    "#                         print(\"Model saved in file: %s\" % self.config.model_save_path)\n",
    "\n",
    "\n",
    "#                 # =========================== Save the Model ==========================================\n",
    "#                 self.saver.save(sess, self.config.model_save_path, global_step=i)\n",
    "#                 print(\"Model saved in file: %s\" % self.config.model_save_path)\n",
    "\n",
    "        except KeyboardInterrupt:\n",
    "            print('training interrupted')\n",
    "        #Save model, plot accuracies    \n",
    "#         self.saver.save(sess, self.config.model_save_path, global_step=self.training_counter)\n",
    "#         print(\"Model saved in file: %s\" % self.config.model_save_path) \n",
    "        \n",
    "        return accuracies\n",
    "    \n",
    "    def test(self, sess):\n",
    "        \n",
    "        test_data = self.gen_bin_data(self.config.test_size, self.config.chan_enc_in_len)\n",
    "        \n",
    "        #sess.run(self.iterator.initializer, feed_dict = {self.X: test_data, self.Y: test_data})\n",
    "        print(\"Testing...\")\n",
    "        test_fd = {self.isTrain : False}\n",
    "        \n",
    "        self.chan_enc_inputs, self.dec_targets = (test_data, test_data)\n",
    "        \n",
    "        test_acc = sess.run(self.accuracy, feed_dict = test_fd) \n",
    "        test_loss = sess.run(self.loss, feed_dict = test_fd)\n",
    "#         print(sess.run(self.channel_encoder.enc_out, test_fd))\n",
    "        print(\"Test loss: \", test_loss, \"\\nTest accuracy: \", test_acc)\n",
    "\n",
    "        return test_acc\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Config(object):\n",
    "    \"\"\"The model configuration\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, \n",
    "                 enc_filters,\n",
    "                 dec_filters,\n",
    "                 numb_tx_bits, \n",
    "                 chan_enc_in_len,\n",
    "                 channel,\n",
    "                 enc_FCs,\n",
    "                 dec_FCs,\n",
    "                 n_epochs = 10, \n",
    "                 init_lr = 0.001,\n",
    "                 batch_size = 100,\n",
    "                 dropout = True,\n",
    "                 train_size = int(1e5),\n",
    "                 test_size = 1000,\n",
    "                 print_every = 100,\n",
    "                 batch_per_epoch = int(1e4)):\n",
    "        self.enc_filters, self.dec_filters = (enc_filters, dec_filters)\n",
    "        self.channel = channel\n",
    "        self.chan_enc_in_len = chan_enc_in_len\n",
    "        self.chan_dec_out_len = self.chan_enc_in_len\n",
    "        self.numb_tx_bits = numb_tx_bits\n",
    "        self.n_epochs = n_epochs\n",
    "        self.init_lr = init_lr\n",
    "        self.dropout = dropout\n",
    "        self.train_size = train_size #Size of train set per epoch\n",
    "        self.test_size = test_size #Size of test set per epoch\n",
    "        self.batch_size = batch_size\n",
    "        self.print_every = print_every\n",
    "        self.batch_per_epoch = batch_per_epoch\n",
    "        self.enc_FCs = enc_FCs\n",
    "        self.dec_FCs = dec_FCs\n",
    "        self.chan_width = 3\n",
    "#         self.block_len = 40\n",
    "        self.num_blocks = 10"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 99,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'enc_filters': [{'filt_sz': [16, 32, 8, 4, 16], 'n_ch': [32, 32, 32, 32, 32], 'strides': [2, 16, 4, 4, 8]}], 'dec_filters': [{'filt_sz': [16, 32, 8, 4, 16], 'n_ch': [32, 32, 32, 32, 32], 'strides': [4, 16, 4, 1, 8]}], 'channel': {'type': 'ISI', 'chan_param': {'ISI_distrib': 'rayleigh', 'chan_filt_width': 3, 'stride': 1, 'distrib_param': 1, 'noise_std': 1}}, 'chan_enc_in_len': 400, 'chan_dec_out_len': 400, 'numb_tx_bits': 600, 'n_epochs': 20, 'init_lr': 0.001, 'dropout': True, 'train_size': 100000, 'test_size': 1000000, 'batch_size': 100, 'print_every': 1000.0, 'batch_per_epoch': 1000, 'enc_FCs': [512, 600], 'dec_FCs': [1024, 256, 40], 'chan_width': 3, 'num_blocks': 10}\n",
      "Chan enc inputs:  Tensor(\"IteratorGetNext:0\", shape=(?, 400), dtype=float32)\n",
      "Tensor(\"encoder/concat:0\", shape=(?, 1536), dtype=float32)\n",
      "Tensor(\"encoder_1/concat:0\", shape=(?, 1536), dtype=float32)\n",
      "Tensor(\"encoder_2/concat:0\", shape=(?, 1536), dtype=float32)\n",
      "Tensor(\"encoder_3/concat:0\", shape=(?, 1536), dtype=float32)\n",
      "Tensor(\"encoder_4/concat:0\", shape=(?, 1536), dtype=float32)\n",
      "Tensor(\"encoder_5/concat:0\", shape=(?, 1536), dtype=float32)\n",
      "Tensor(\"encoder_6/concat:0\", shape=(?, 1536), dtype=float32)\n",
      "Tensor(\"encoder_7/concat:0\", shape=(?, 1536), dtype=float32)\n",
      "Tensor(\"encoder_8/concat:0\", shape=(?, 1536), dtype=float32)\n",
      "Tensor(\"encoder_9/concat:0\", shape=(?, 1536), dtype=float32)\n",
      "Channel input:  Tensor(\"concat:0\", shape=(?, 600, 1), dtype=float32)\n",
      "Channel output:  Tensor(\"add_10:0\", shape=(?, 600, 1), dtype=float32)\n",
      "channel_out: Tensor(\"Squeeze:0\", shape=(?, 600), dtype=float32)\n",
      "Tensor(\"decoder/concat:0\", shape=(?, 3264), dtype=float32)\n",
      "Tensor(\"decoder_1/concat:0\", shape=(?, 3264), dtype=float32)\n",
      "Tensor(\"decoder_2/concat:0\", shape=(?, 3264), dtype=float32)\n",
      "Tensor(\"decoder_3/concat:0\", shape=(?, 3264), dtype=float32)\n",
      "Tensor(\"decoder_4/concat:0\", shape=(?, 3264), dtype=float32)\n",
      "Tensor(\"decoder_5/concat:0\", shape=(?, 3264), dtype=float32)\n",
      "Tensor(\"decoder_6/concat:0\", shape=(?, 3264), dtype=float32)\n",
      "Tensor(\"decoder_7/concat:0\", shape=(?, 3264), dtype=float32)\n",
      "Tensor(\"decoder_8/concat:0\", shape=(?, 3264), dtype=float32)\n",
      "Tensor(\"decoder_9/concat:0\", shape=(?, 3264), dtype=float32)\n",
      "Tensor(\"concat_1:0\", shape=(?, 400), dtype=float32)\n",
      "Dec out:  Tensor(\"concat_1:0\", shape=(?, 400), dtype=float32)\n",
      "Total model parameters:  1040875\n",
      "Epoch:  1 Accuracy:  0.655625 Training iteration:  1000 Training time:  29 s Training loss:  0.72910607 Learning rate 0.001\n",
      "Epoch:  2 Accuracy:  0.66365 Training iteration:  2000 Training time:  57 s Training loss:  0.6879595 Learning rate 0.001\n",
      "Epoch:  3 Accuracy:  0.665875 Training iteration:  3000 Training time:  84 s Training loss:  0.6860227 Learning rate 0.0007071068\n",
      "Epoch:  4 Accuracy:  0.665025 Training iteration:  4000 Training time:  127 s Training loss:  0.67601466 Learning rate 0.00040824828\n",
      "Epoch:  5 Accuracy:  0.666775 Training iteration:  5000 Training time:  170 s Training loss:  0.66901976 Learning rate 0.00020412414\n",
      "Epoch:  6 Accuracy:  0.666575 Training iteration:  6000 Training time:  213 s Training loss:  0.6740667 Learning rate 9.12871e-05\n",
      "Epoch:  7 Accuracy:  0.666725 Training iteration:  7000 Training time:  255 s Training loss:  0.6753465 Learning rate 3.72678e-05\n",
      "Epoch:  8 Accuracy:  0.666375 Training iteration:  8000 Training time:  285 s Training loss:  0.6659002 Learning rate 1.4085905e-05\n",
      "Epoch:  9 Accuracy:  0.66375 Training iteration:  9000 Training time:  312 s Training loss:  0.6692112 Learning rate 4.9801192e-06\n",
      "training interrupted\n",
      "Testing...\n",
      "Test loss:  0.67448497 \n",
      "Test accuracy:  0.6663\n"
     ]
    },
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZIAAAEWCAYAAABMoxE0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvFvnyVgAAIABJREFUeJzt3Xl8VNX5+PHPk4WEEEKAQNj3oAKyBQEFF9yKdW1rXYq4VOTbVqrWpS5trbW2tbXV/lTqhrsiriClVkRFJVh2EQQUQtj3BAiEELI9vz/uCY5Dlpkkk5lJnvfrNa/M3Hvuvc8suc8959x7rqgqxhhjTG3FhDsAY4wx0c0SiTHGmDqxRGKMMaZOLJEYY4ypE0skxhhj6sQSiTHGmDqxRGIqJSLdRKRARGKrKaMi0qch4wqGiHwiIhNCtO75IjLEPb9HRKaEYjs+29soImeHchv1qbrfhoiME5EParneRSLSv27RVbnufiKyREQkFOsPNxFJF5E1IpJQ3+u2ROLH7Xz2heLDjiaqullVk1W1DEK7U442InIhcFBVvwBQ1T+raqP4bBoiYanqq6p6rs82gzkg+Ttwf2gi44/A39Xv4joRyRCRIhF5xW/6T0Rkk4gcEpEZItLGZ14bEZnu5m0SkZ8EE4iInC0iy9zyW0XkMp95g0VkqYgUur+DfeaJiPxVRPLc468ViVFVdwFzgYlBfSoBsETiQ0R6AKcCClzUwNuOa8jtmTr5GfByuINoomYCY0SkQ32uVEQ6AmOAGZXMngws9ivfH3gKGA+kA4XAv/yWKXbzxgFPBFqTEpF+wFTgN0ArYBCw1M1rBrwLvAK0Bl4E3nXTwUsSl7hlBgIXAv/ns/pX/V7XD1W1h3sA9wLzgYeBWX7zmgP/ADYB+UAW0NzNGw18DuwHtgDXuumfABN81nEtkOXzWoEbgXXABjft/7l1HMD78ZzqUz4WuAdYDxx087vi/Wj/4RfvTOBXlbzHPwCPuefxwCHgIZ/3WAS0AXq4+OKAPwFlbl4B8LhP/D9z8e93cUgVn20McJeLPQ94A2jj5lVsayKwHdgB3O6zbALwTzdvu3ue4DP/YmC5+8zWA2N9Pv8/uu/0IPABkObmJeL9M+a52BcD6QH8RpoBh4EuPtPuA17xey/XAJuBXOA3fmXfAl53MS0DBgWw3Y3A2UF8lpVuv4ZtvAyUu/dXAPwauBzYAKS4MucBO4F2NaxLgZuAHBfDQ0CM//8B8Jkre8ht83IgDZjlvpe9wLyKZd0yc4Br6vl//2rgw0qmX+E+36PfsZv+Z2Cqz+veeImjJdDCPe/r99k+GGAsU4E/VjHvXGAbPv9n7nuu+M1/Dkz0mXc9sMDndRxe0uter59ffa4s2h9ANvALIBMowWfHgreT/ATojLdDPwVvB9fd7RCuxNsxtwUGu2U+oeZEMgdvx12RlK5y64gDbnP/tIlu3h3ASuA4QPCOOtoCw/F2sBX/qGnux3LMjhE4E1jpnp+CtzNa6DPvS/e8h4svrrL34hP/LCAV6AbsqfhBV7Ldm4EFQBf3uT0FvOa3rdfcP+GJbl0VO8773bLtgXbun+WPbt5wvMR+Dt4OtjNwvE/M64G+eEnyE9w/M95R2b+BJPd9ZuJ2ljX8RvoDh/ym3cexieQZt81BwBHgBJ+yJcCleL+X2/F21PE1bHejz+cRyGdZ6fYDeH9Ht+Mz7VXgBbzf2nbgggDWo3jNKG3cb2Ntxe+Hyv8P+vi8/gvwpPt84vFaCXx3nI8CD1ex3dF4Caiqx+gqlnsImOw3LcXF3YVjE8m7wJ1+5Qvc72gIUOg373bg3wF+Bzl4B0Ar8Q6qXuHbA4VfAf/1Kz8LuM09zwdG+MwbhtcM61t+BXBRILEE+rCmLUdERuMlhTdUdSneDugnbl4M8FPgZlXdpqplqvq5qh5xZT5U1ddUtURV81R1eRCb/ouq7lXVwwCq+opbR6mq/gNvR3GcKzsB+K2qfqOeL13ZRXg/oLNcuSuAT9RrE/X3PyBDRNoCpwHPAp1FJBk4Hfg0iNjB2zHvV9XNeDuOwVWU+xnekfFW97ndB1zq16T3B1U9pKorgefxkjN4TQP3q+puVd2DV6sa7+ZdDzynqnNUtdx9P1/7rPN5VV3rPt83fOIrwdsx9nHf51JVPRDA+03FO3CoyR9U9bCqfgl8ibdDr7BUVd9S1RK82m8iMDKAdVYI9LOsavvBuhHvIOMTvJ3hrACX+6v7bW/Gq0VeWdMCTgnQEe+ouURV56nbAzoH8b6HY6hqlqqmVvPIqmKblX2vfwSeVdWtlZRPxvuf85WPVyNJxqsdVzYvEF3wft8/AjLwDggeC2C7lc3PB5L9TiCo8vOrLUsk37oG+EBVc93rqW4aeEf4iXjJxV/XKqYHaovvCxG53Z1ZkS8i+/HaSNMC2NaLeLUZ3N9K2/DdDnUJXtI4DS9xfA6MonaJZKfP80K8H3JlugPTRWS/e19r8JrL0n3K+H4Wm4BO7nkn97qyeTV9/lXF9zIwG5gmIttF5G8iEl/NeirsI7AdQnWfy9H3qarlwFa+fT+BCOSzDPR7qZGq7gfeBAbgNe8GqqrvsyYP4bUOfCAiOSJyl9/8lni1i/r0ne/VdWCfDTxSRfkCvBqLrxS8nXR18wJxmG8PgArwmtG+H8B2K5ufAhT4JeJ6//wskQAi0hy4DDhdRHaKyE68KuQgERmE18ZbhNcO6m9LFdPBa/dN8nldWQfh0S9YRE7Fa5e+DGitqql4RxQVRxPVbesV4GIX7wlU3mlY4VO8I8wheH0DnwLfw2sm+qyKZbSK6YHaApznd3SYqKrbfMp09XneDa8ZBfe3exXzqvtMquSOdP+gqv3wmvguwGsnr0k23skxnYPdpo+j79PVdrvw7fsJRCCfZW0d8z27nepP8ZoeHw1iXVV9n9UHoHpQVW9T1V54J73cKiJn+RQ5Aa+WdQwROdWdtl7V49QqNrsCrwm0whl4zYSb3f7gduBHIrLMzV+FTy1PRHrhtR6sdY84EcnwWd8gt0wgVvDd78H3+SpgoF8NY6DPur8Tl/92Xa21D1V8frVlicRzCd4RXT+8po/BeD/WecDV7qjxOeBhEekkIrEicrI7RfhV4GwRuUxE4kSkrc/peMuBH4pIkju98foa4mgJlOL1D8SJyL189+hiCvBHdzqiiMhA10SFq34vxjvSfruiqawKn+LtNFerajGu/wOvw39PFcvsAnrVEH91ngT+JCLdAUSknYhc7Ffmd+6z6g9ch9chDd4O7LdumTS8kyIqTsV8FrhORM4SkRgR6Swix9cUjIiMEZETxbtO5gBec0q5m3efiHxS2XLu8/oQr/ZWW5ki8kP3T30LXh/GgiCWD+SzrJSInCEi1R0UfOd7FpGKkxLuwftOOovILwKM8w4RaS0iXfH6dV6vopz/Ni8QkT5uZ5mP979Z8d0k4vVDzKlsRa4ZLLmax7wqYpgDDHXrB3ga7wClYn/wJPAfvAMu8P7vL3SJqwVeP947LgkeAt4B7heRFiIyCu+EkJfde+gh3inPPaqI5Xm833QvEUnCO7GiojnxE/d53CQiCSIyyU3/2P19CS/xdhaRTnj9rC/4rHs4sFFVfWv4dWaJxHMNXlVys6rurHgAjwPj3D/87XidX4vxziT5K17n9ma8audtbvpyvj0ieATv7I1deE1Pr9YQx2zgfbwjmk14tSDf5oGH8dr5P8Db+T2L135a4UW8juqaTk393C1XUftY7bZVVW0EvLPJLhXvGptgjkp9l5+J11xxEG/HOcKvzKd4R/wf4Z3PX3HR2gN4zXEr8L6DZW4arn/oOrzPOt+tozs164B39tQBvKahT/n2c+uKd6ZXVSpO+6ytd/HOTtrn1vND118SqEA+y6p0xfv+q/IXvKS9X0Rud6+3qOoTrj/mKuABv6PtqryLd2bhcryd8LNVlLsPeNFt8zK8foEP8Zpp/gf8S1XnurIX4vX/BVODq5HrT/wYb4ePqhb67QsKgKKKAy1VXYXXV/UqsBvvINA3wf4C739sN96B0M/dMuB9B5vwzr6qLJbn8BLCQlfuCN4ZcBUHMpfgHQjux6spXuKmg/fb/Dfe/8lXeJ/7Uz6rH4eXFOuVfLfpzEQzETkN7+ixu0bRF+uOzCrOXCoNbzQgIsuBs1Q1r5oy84FJ6i5KDGLd9+F18F9VU9lQEO8K/DdVdXY4tl9XIrIQuF5VvwrBuvvhHYwND+X/j4j8Ftijqk/VWLh+t9se74BpiKoW1eu6o2h/Y6rhOoqn4Z2+G6orf0Mi0hJJKIU7kRgTCta01QiIyAl41dyOeKdZmigj345tVtmjW7jj81ddp3a4YzMNz2okxhhj6sRqJMYYY+qkSQwUmJaWpj169KjVsocOHaJFixb1G1A9sLiCY3EFx+IKTmONa+nSpbmq2q7GglqP461E6iMzM1Nra+7cubVeNpQsruBYXMGxuILTWOMClqiNtWWMMSbULJEYY4ypE0skxhhj6sQSiTHGmDqxRGKMMaZOLJEYY4ypE0skxhhj6qRJXJBoTCRat+sgs3KK2ZG0mXbJCbRr6T3SkhNoFmfHeCZ6WCIxJgz+u3IHt77xJYdLynhr7cpj5qcmxZOWnPCdBNOupfc6reW309u0aEZsjFSyhehTWlbOgaJS9hcWs/9wCfmHS8gvLDn6en9hCZu3HGF93AYy2ifTp30yHVsl8t2bBZpwsERiTAMqL1f++eFaHv04myHdUhnX8winnHwyew4e8R4FR44+z3XPv9y6n90HjnC4pOyY9cUItPVJOGmVJJ6K5ymJcQ2y0y0qKfOSgNv5H00MhSXsP1zsTTtcwoGK+W7awaLq7yDQMjGO8rJSPt6y+ui05IQ4erdPPppYMtonk9G+JV1aNyemkSTYaGCJxJgGUnCklFtfX84Hq3fx48wuPPCDAfwvax6dUpvTKbV5jcsfOlJ6NNnk+iWdiulrdx0kt+AIJWXHjurdLDbGSzYtK6vpNHN/E2nXMgFVpeCIqx0U+iSFw9++zvd7XTG/qKS8yvcQGyOkNo+nVVI8qc3jadcygYz2yaQ0jyfVTUtNanZ0fmpSM1Kbx9MyMY642Bg++eQTThx2Mtm7C1i3u8D9Pchna/fw1tKtR7eTGB9Dr7RkMtK/TTJ92reke9sk4mOt2bC+WSIxpgFsyjvEDS8tYf2eQ9x7QT+uG9Uj6NpBi4Q4WiTE0SOt+kH4VJX8wyXH1nJ8Es/WfYUs37KPvEPFVHYnCQF0dtU3UUyMjyG1eTNSk+JJaR5PtzZJDOziksDRpODNb9U8/ui05IS614raJifQNjmBEb3afmd6/uESsncXkL37IOt2FZC9p4AlG/fx7vJv78obHyv0TGtxNLFktPeSTc+0FiTExdYprqbMEokxITY/O5cbpy5DFV68bjijM9JCuj0R8Y7kk5qRkd6y2rKlZeXsPVT83dpNwRFWr81h4PG9SW3uVztwiSExPvJ2uq2ax5PZvTWZ3Vt/Z/qhI6Ws31NwtBazblcBq7cf4P2vdlLukmiMQPe2LY42j/VxTWS927cgqZntJmtin5AxIaKqvPD5Rh74zxp6t2vBM1cPo3vbyBpqPC42hvYpibRPSfzO9E/Yyhmn9Q5TVPWrRUIcA7ukMrBL6nemF5WUsSH3kNdEtuvg0aayuV/vprT822pal9bNv9P/0ifdSzQpifEN/VYiliUSY0LgSGkZv5vxFW8s2co5/dJ55PLBJCfYv1skSYyP5YSOKZzQMeU700vKytmUd8hrHquoxewu4PP1eRSXftv/k56S4CWW9skkFpRyumqTPYPMftnG1LPdB4v4+SvLWLppHzed2Ydbzu5rZxBFkfjYGPq0b0mf9t9tFiwrV7bsLfRJLgfJ3l3AG0u2UFhcRqtOOfz8jMZRiwuWJRJj6tGKrfv5v5eXsr+whMk/Gcr5AzuGOyRTT2JjhB5pLeiR1oKz+6UfnV5erlzx6Gz+NvtrjuuQzJnHp1ezlsbJzoMzpp68u3wbP37yf8SI8NbPT7Yk0kTExAjXn5hAv44p3PTacrJ3Hwx3SA3OEokxdVRWrvzlv2u4edpyBndNZeakUfTv1CrcYZkGlBArPHP1MBLjY5jw4hLyC0vCHVKDCmkiEZGxIvKNiGSLyF1VlLlMRFaLyCoRmeozvZuIfCAia9z8Hm66iMifRGStm3dTKN+DMdXJP1zC9S8u5qlPc7hqZDdemTCCtskJ4Q7LhEGn1OY8eVUm2/YfZtJryygtq/rCzMYmZIlERGKBycB5QD/gShHp51cmA7gbGKWq/YFbfGa/BDykqicAw4Hdbvq1QFfgeDdvWqjegzHVWb+ngB/8az5Z63L50w8G8MAlJ9pV003csB5t+OPFA5i3LpcH//t1uMNpMKHsbB8OZKtqDoCITAMuBlb7lLkBmKyq+wBUdbcr2w+IU9U5bnqBzzI/B36iquW+yxjTkOZ+s5ubXvuCZrExTL1hJMN7tgl3SCZCXDG8G1/vPMiUrA0c3zGFSzO7hDukkBOtbHyE+lixyKXAWFWd4F6PB0ao6iSfMjOAtcAoIBa4T1XfF5FLgAlAMdAT+BC4S1XLRCQPeBj4AbAHuElV11Wy/YnARID09PTMadNqV3EpKCggOTm5VsuGksUVnPqKS1X578YS3vymhK4tY7hpaAJpzWtfC2nsn1d9i5a4SsuVfywpYt2+cu4akUif1PCMBFDXz2vMmDFLVXVYjQVVNSQP4FJgis/r8cDjfmVmAdOBeLyEsQVIdcvmA73wak1vA9e7ZQqA29zzHwLzaoolMzNTa2vu3Lm1XjaUIjGuJRvz9K7nP9BdBw6HO5Rj1Mfndbi4VG9+bZl2v3OW/uLVpXroSElExBUKFldwKotrb8ERPfWvH+uwB+bojv3h+Z+o6+cFLNEA9vehbNDdhteXUaGLm+ZrKzBTVUtUdQNe7STDTV+uqjmqWgrMAIb6LPOOez4dGBii+E0QFuTkMW7KQl77uphT/vIxP3t5KZ+u3UN5eWhqvA1tR/5hLnvqf7z75Xbu+N5xPH7lEBuDyVSrdYtmPHP1MAqPlDLx5SUUVXIbgMYilIlkMZAhIj1FpBlwBTDTr8wM4AwAEUkD+gI5btlUEWnnyp3Jt30rM4Ax7vnpeMnHhNGyzfu4/oXFdGmdxG9GJHLdqB4s3JDHNc8t4tS/zeWxj9axM78o3GHW2tJNe7nwsfnk7DnEM+OHceOYPk12KAwTnOM6tOSRywezYms+d729oqIlptEJWSJxNYlJwGxgDfCGqq4SkftF5CJXbDaQJyKrgbnAHaqap6plwO3ARyKyEm9U62fcMg8CP3LT/4LXl2LC5Ktt+Vzz3CLSWibw6oQRZLSO5Tfn92PBPWfx2JVD6JGWxD/mrOWUBz9iwouL+WjNrqg6LfKNxVu48umFtEiIZfovTvnOFc3GBOLc/h247Zy+zFi+nac/ywl3OCER0rq5qr4HvOc37V6f5wrc6h7+y86hkmYrVd0PnF/vwZqgfbPzIOOfXUhKYjxTbxhJekoia9y8hLhYLhzUiQsHdWJj7iFeX7KFN5ds5cM1S+iQkshlJ3Xl8pO60jmAGzqFQ2lZOQ/8Zw0vfL6RUzPSeOzKIaQmNQt3WCZKTTqzD1/vPMiD739N3/SWjDm+fbhDqld20ruplfV7Chg3ZQHN4mKYesOIahNCj7QW3Dn2eP5395k8edVQjuvQksc+Xsfov37Mtc8v4v2vdlISQbWUfYeKueb5Rbzw+UauH92T5689yZKIqRMR4aEfD+SEDinc9NoXZO8uqHmhKGK9hSZom/MKGffMQgBenTAy4HtsxMfGMHZAR8YO6MiWvYW8uWQLry/Zws9eWUq7lgn8OLMLV5zUjW5tk0IZfrXW7jrIhBeXsDO/iL//eFCTuAbANIykZnE8c80wLnosi4kvLWH6jaNo1bxx3NPEaiQmKNv2H+bKZxZQVFrGKxNG0Kd97c5R79omiVvPPY75d57JlKuHMahLK578dD2nPTSXq6YsZNaK7d+590ND+GDVTn4weT5FJWW8/n8jLYmYetc5tTlPXJXJln2F/PK1LyhrJGc1Wo3EBGzXgSLGPbOAA0UlvHbDSI7vkFLzQjWIi43h7H7pnN0vnR35h3lzyVZeX7yFSVO/oE2LZlya2YUrTupKr3ahuwhNVXns42wenrOWQV1TeXp8Jul+dww0pr4M79mG+y8ewN3vrOTB/67hN+f3q3mhCGeJxAQkt+AI46YsZM/BI7w8YQQDOtf/6LYdWzXnprMyuHFMH+at28O0RVt4LmsDT3+Ww4iebbhyeDfGDuhQr/cLLywu5fY3v+S9lTv54ZDO/PmHJ0bk/chN43Ll8G6s2XGAZ+Zt4PgOKfwoymu/lkhMjfYXFnPVlIVs3VfIC9cNZ2i31iHdXmyMcMZx7TnjuPbsPljEW0u9Wsotry8n9d/x/GBIZ64c3o2+6S1rXlk1tuwt5IaXlrB210F+e/4JXD+6p10fYhrM7y7ox7pdBdw9fSW92yczuGtqzQtFKOsjMdU6UFTC1c8tImfPIZ4eP4yRvdo26Pbbt0zkF2f0Ye5tZ/DqhBGM7pPGKws2ce4jn/GjJz7nzSVbOFwc/BXDC3LyuHjyfLbvP8zz1w1nwqm9LImYBhUfG8PkcUNJT0lg4ktL2HUgei/atURiqnToSCnXPb+Y1dsP8K9xQzmtb7uaFwqRmBhhVJ80Hv/JUBbcfRa/+f4J7Css5o63VjD8zx/yuxlfsWp7fkDrennBJq6aspDWSfHMuHEUp4fxfZmmrY0bRqXgSCkTX14atcOoWCIxlSoqKWPCi0v4YvM+Hr1ySERd0d02OYEbTuvFR7eezusTR3L2Cem8vmQL5z+axcWPZ/Haos0UHCk9Zrni0nLumb6S3834itP6tmP6jaNC2olvTCCO75DCw5cN5sst+7nnnZVROYyKJRJzjCOlZUx8eSkLNuTx8GWD+f6JkXnvcRFhRK+2PHL5YBbdcxa/v7AfRSXl3P3OSkb86UPufmcFK7buR1U5cES5aspCpi7czC/O6M0zVw8jJbFxnMNvot/YAR249Zy+vPPFNqbM2xDucIJmne3mO0rKypk09Qs+W7uHv/7oRC4Z0jncIQUkNakZ143qybWn9GDZ5v1MW7SZGV9s57VFW+jXMYVd+w5zqKyIR68cwkWDOoU7XGOO8csz+/D1zgP85b9ryEhP5ozjomcYFauRmKPKypVfvb6cOat3cf/F/bn8pG7hDiloIkJm99Y89ONBLPzNWfzxkgGIQEIcvPWzUyyJmIglIvz9x4M4rkMKv3ztC9bviZ5hVCyRGADKy5U73vqSWSt2cM/3j+fqk3uEO6Q6S0mMZ/zI7vznplP58+ikkFz7Ykx9SmoWxzNXZxIfG8MNLy3hQFFJuEMKiCUSg6ry23e/4p1l27j1nL5MPK13uEMypsnq0jqJJ8YNZXNeITdFyTAqlkiaOFXl/lmrj3ZC//LMPuEOyZgmb0Svtvzh4v588s0e/vb+1+EOp0bW2d6EqSp/m/0Nz8/fyE9H9eSO7x1nF+UZEyHGjejOmh0HeOqzHI7v2JIfDIncYVSsRtKEPfpRNk98sp5xI7rxuwtOsCRiTIT5/YX9GdmrDXe+vZIvt+wPdzhVskTSRD316Xoe+XAtPxrahT9ePMCSiDERKD42hn+Ny6R9ywQmvryE3RE6jIolkibohfkb+Mt/v+aCgR3526UDiYmxJGJMpKoYRuVgUeQOo2KJpIl5bdFm7vv3as7tl84jlw8m1pKIMRHvhI4pPHzZIJZv2c9vpn8VccOoWCJpQqZ/sZV7pq/k9L7teOwnQ4iPta/fmGgxdkBHbjk7g7eXbeXZrMgaRsX2JE3Ef1bs4LY3vuTkXm15anwmCXF28yZjos1NZ2Zw3oAO/Pm9NXy6dk+4wznKEkkTMGf1Lm6e9gVDu7VmyjXD7A6AxkSpmBhvGJW+6S355dRlbMg9FO6QAEskjd5na/dw46vL6N8pheevO4mkZnbpkDHRrEVCHM9cPYy42BgmvLg4IoZRsUTSiC3IyWPiy0vo3T6ZF386nJY2bLoxjULXNkn8a9xQNuUVcnMEDKNiiaSRWrppHz99YTFdWyfxyvXDSU1qFu6QjDH1aGSvtvz+ov7M/WYPD83+JqyxhDSRiMhYEflGRLJF5K4qylwmIqtFZJWITPWZ3k1EPhCRNW5+D7/lHhWR6BlnuQGt3JrPtc8ton3LBF6dMIK2yQnhDskYEwLjR3Zn3IhuPPnpet5dvi1scYSswVxEYoHJwDnAVmCxiMxU1dU+ZTKAu4FRqrpPRHzv5PIS8CdVnSMiyUC5z3LDgNahij2ardlxgPHPLSSleTxTbxhJ+5TEcIdkjAmh31/Yn3W7C/j1WyvomdaCgV1SGzyGUNZIhgPZqpqjqsXANOBivzI3AJNVdR+Aqu4GEJF+QJyqznHTC1S10M2LBR4Cfh3C2KNS9u4CrpqykMS4WF67YSSdUpuHOyRjTIg1i4vhiXFDSUtOYOJLS8MyjIqE6gpJEbkUGKuqE9zr8cAIVZ3kU2YGsBYYBcQC96nq+yJyCTABKAZ6Ah8Cd6lqmYjcDMSo6iMiUqCqyVVsfyIwESA9PT1z2rRptXofBQUFJCdXuomw8o9rd2E5f15YRLnC3cMT6Zgcnu6vaPm8IoXFFRyLq2qbD5TxwMIiurWM4c7hicTHSJ3jGjNmzFJVHVZjQVUNyQO4FJji83o88LhfmVnAdCAeL2FsAVLdsvlAL7zmt7eB64FOQBZebQWgIJBYMjMztbbmzp1b62VDyTeuLXsP6Sl/+UgH/2G2fr3jQPiC0uj4vCKJxRUci6t6763Yrt3vnKW3vbFcy8vL6xwXsEQD2MeG8qKCbUBXn9dd3DRfW4GFqloCbBCRtUCGm75cVXPgaM1lJLAT6ANku9Fqk0QkW1Wb7N2YduYXMW7KQg4UlfDaDSM5rkPLcIdkjAmT807syE1nZfDoR+s4oWMKDXWv01C2fywGMkSkp4g0A64AZvqVmQGcASAiaUBfIMctmyoi7Vy5M4HVqvofVe2gqj1UtQdQ2JSTSG7BEcZNWUDuwSO8+NPhdk+EQEwqAAAbbElEQVRyYwy3nJXB9/qn86f/rOar3IYZKThkiURVS4FJwGxgDfCGqq4SkftF5CJXbDaQJyKrgbnAHaqap6plwO3ARyKyEhDgmVDFGo0KipWrpixk2/7DPHftSQztZiexGWO8YVQevmwwfdNb8sSXRWxsgGFUQjpehqq+B7znN+1en+cK3Ooe/svOAQbWsP7I63VrAPmHS/j7kiK2F8Jz15zEiF5twx2SMSaCVAyj8qsXPyOleehHtLAr26PQA7NWs+VgOU9eNZTRGWnhDscYE4G6tkli0pBE2rQI/agWlkiiTHm58vHXuxneMZYzj08PdzjGGGOJJNqs2XmAvEPFDGhrQ8EbYyKDJZIoMz87F4B+lkiMMRHCbk4RZeatyyWjfTKtEyPrns3GmKbLaiRRpKikjMUb9zKqj3WwG2MihyWSKLJs0z6KSso51c7UMsZEEEskUSQrO5e4GLHrRowxEcUSSRTJys5lSLdUkhOsa8sYEzkskUSJ/YXFrNyWb/0jxpiIY4kkSny+Pg9VrH/EGBNxLJFEiazsXJIT4sJyG01jjKmOJZIoMT87l5G92hIfa1+ZMSay2F4pCmzZW8imvEJG97GztYwxkccSSRTIcsOijM5oV0NJY4xpeJZIokDWulw6pCTSu12LcIdijDHHsEQS4crLlfnrcxnVJw13n3pjjIkolkgi3KrtB9hfWGKn/RpjIpYlkghX0T9yinW0G2MilCWSCJeVvYfjO7SkfcvEcIdijDGVskQSwbxh4/fZsCjGmIhmiSSCLd64l+LSckZb/4gxJoJZIolgWdm5xMcKI3q2CXcoxhhTJUskEWx+di5Du7UmqZkNG2+MiVw1JhIR+aWItG6IYMy39h4qZtX2A4y2/hFjTIQLpEaSDiwWkTdEZKzYVXEN4vP1uahi/SPGmIhXYyJR1d8CGcCzwLXAOhH5s4j0DnFsTVrWulxaJsZxYudW4Q7FGGOqFVAfiaoqsNM9SoHWwFsi8rfqlnM1mG9EJFtE7qqizGUislpEVonIVJ/p3UTkAxFZ4+b3cNNfdev8SkSeE5H4gN5pFFFV5q3L5eRebYmzYeONMREukD6Sm0VkKfA3YD5woqr+HMgEflTNcrHAZOA8oB9wpYj08yuTAdwNjFLV/sAtPrNfAh5S1ROA4cBuN/1V4HjgRKA5MCGA9xlVNuUVsm3/YRsWxRgTFQI5HagN8ENV3eQ7UVXLReSCapYbDmSrag6AiEwDLgZW+5S5AZisqvvcOne7sv2AOFWd46YX+Gz3vYrnIrII6BLAe4gqFcOi2IWIxphoIF6rVTUFREYCq1T1oHudApygqgtrWO5SYKyqTnCvxwMjVHWST5kZwFpgFBAL3Keq74vIJXg1jWKgJ/AhcJeqlvksGw8sBG5W1XmVbH8iMBEgPT09c9q0adW+z6oUFBSQnJxcq2Vr67EvitiYX87fT29e5Yi/4YgrEBZXcCyu4FhcwalrXGPGjFmqqsNqLKiq1T6AL3AJx72OAZYFsNylwBSf1+OBx/3KzAKmA/F4CWMLkOqWzQd64dWa3gau91v2GeCfNcWhqmRmZmptzZ07t9bL1kZpWbme+Pv39Y43l1dbrqHjCpTFFRyLKzgWV3DqGhewRAPYxwbSkytuhRWJp5zAmsS2AV19Xndx03xtBWaqaomqbsCrnWS46ctVNUdVS4EZwNCjAYn8HmgH3BpAHFFl5bZ8DhSV2t0QjTFRI5BEkiMiN4lIvHvcDOQEsNxiIENEeopIM+AKYKZfmRnAGQAikgb0deteDKSKSMXe9Exc34qITAC+B1zpklqjMr9i2PjeNmy8MSY6BJJIfgacgleb2AqMwPU9VMfVJCYBs4E1wBuqukpE7heRi1yx2UCeiKwG5gJ3qGqeen0htwMfichKQPCasgCexLtI8n8islxE7g3wvUaFeev20K9jCmnJCeEOxRhjAlJjE5V6Z1JdUZuVq3eG1Xt+0+71ea54zVPHNFGpd8bWwEqmN9qBpwqLS1m2aT/XjuoR7lCMMSZgNe6URSQRuB7oDxy9u5Kq/jSEcTVJizfuo7is3MbXMsZElUCatl4GOuD1S3yK12l+MJRBNVVZ6/bQLDaGk3rYsPHGmOgRSCLpo6q/Aw6p6ovA+Xj9JKaeZWXnkdm9Nc2bxYY7FGOMCVggiaTE/d0vIgOAVkD70IXUNO05eIQ1Ow7YaL/GmKgTSMf10+5+JL/FO303GfhdSKNqgj5f7532a/0jxphoU20iEZEY4IB6Y2F9hneluQmBrHW5tGoezwAbNt4YE2WqbdpyF/z9uoFiabJUlfnZuZzSuy2xMXbfMGNMdAmkj+RDEbldRLqKSJuKR8gja0Jycg+xPb/I+keMMVEpkD6Sy93fG32mKdbMVW8qhkWx/hFjTDQK5Mr2ng0RSFM2b10uXds0p3vbFuEOxRhjghbIle1XVzZdVV+q/3CantKychasz+OCQR3DHYoxxtRKIE1bJ/k8TwTOApbh3QrX1NGKbfkcPFLK6D42bLwxJjoF0rT1S9/XIpIK1O52g+YYWetyEYGTbdh4Y0yUCuSsLX+H8O5maOpBVnYu/Tul0KZFs3CHYowxtRJIH8m/8c7SAi/x9APeCGVQTcWhI6V8sXkf14+2E+CMMdErkD6Sv/s8LwU2qerWEMXTpCzasJeSMrXTfo0xUS2QRLIZ2KGqRQAi0lxEeqjqxpBG1gTMW5dLQlwMw3q0DncoxhhTa4H0kbwJ+N4bvcxNM3U0PzuXk3q0ITHeho03xkSvQBJJnKoWV7xwz61nuI52Hyjim10HbVgUY0zUCySR7BGRiypeiMjFQG7oQmoa5tuw8caYRiKQPpKfAa+KyOPu9Vag0qvdTeDmrculdVI8/TqmhDsUY4ypk0AuSFwPjBSRZPe6IORRNXJHh43vk0aMDRtvjIlyNTZticifRSRVVQtUtUBEWovIAw0RXGOVvbuAXQeOcKo1axljGoFA+kjOU9X9FS/c3RK/H7qQGr8sN2z8KEskxphGIJBEEisiCRUvRKQ5kFBNeVOD+dm5dG+bRNc2SeEOxRhj6iyQzvZXgY9E5HlAgGuBF0MZVGNWUlbOgpy9XDy4U7hDMcaYelFjjURV/wo8AJwAHAfMBroHsnIRGSsi34hItojcVUWZy0RktYisEpGpPtO7icgHIrLGze/hpvcUkYVuna+LSFRd0/Lllv0UHCm1036NMY1GoKP/7sIbuPHHwJnAmpoWEJFYYDJwHt5Aj1eKSD+/MhnA3cAoVe0P3OIz+yXgIVU9ARgO7HbT/wo8oqp9gH3A9QG+h4gwzw0bf0pvSyTGmMahykQiIn1F5Pci8jXwGN6YW6KqY1T18aqW8zEcyFbVHHc1/DTgYr8yNwCTXQc+qrrbbbsf3hX1c9z0AlUtFBHBS2RvueVfBC4J9M1GgvnZuQzs3IpWSfHhDsUYY+qFqGrlM0TKgXnA9aqa7ablqGpAY56LyKXAWFWd4F6PB0ao6iSfMjOAtcAoIBa4T1XfF5FLgAlAMd69Tz4E7gJaAwtcbQQR6Qr8V1UHVLL9icBEgPT09Mxp02p3L66CggKSk5Nrtay/w6XKjR8V8v2e8Vzat24tcvUZV32yuIJjcQXH4gpOXeMaM2bMUlUdVmNBVa30gXekPw3YAjyDd4vdDVWVr2T5S4EpPq/HA4/7lZkFTAfi8RLGFiDVLZsP9MI7IeBtvCasNLxaTsXyXYGvaoolMzNTa2vu3Lm1XtbfnFU7tfuds3R+9p46r6s+46pPFldwLK7gWFzBqWtcwBINYH9fZdOWqs5Q1SuA44G5eP0X7UXkCRE5N4Bkts3t6Ct0cdN8bQVmqmqJqm7Aq51kuOnL1WsWKwVmAEOBPCBVROKqWWfEysrOJTE+hszuNmy8MabxCOSsrUOqOlVVL8TbcX8B3BnAuhcDGe4sq2bAFcBMvzIzgDMARCQN6AvkuGVTRaSdK3cmsNplyLl4NRaAa4B3A4glImRl5zK8Z1sS4mzYeGNM4xHUPdtVdZ+qPq2qZwVQthSYhHe68BrgDVVdJSL3+4wmPBvIE5HVeAniDlXNU9Uy4Ha861dW4l2/8oxb5k7gVhHJBtoCzwbzHsJlR/5hsncX2LAoxphGJ5ALEmtNVd8D3vObdq/PcwVudQ//ZecAAyuZnoN3RlhUmZ+dB9iwKMaYxieoGompvfnZubRt0YzjO7QMdyjGGFOvLJE0AFUlKzuXUTZsvDGmEbJE0gDW7ipgz8EjNiyKMaZRskTSAOat2wPAKLs/uzGmEbJE0gDmZ+fSK60FnVObhzsUY4ypd5ZIQqy4tJyFG/Yy2mojxphGyhJJiH2xeR+FxWV22q8xptGyRBJiWdm5xAic3LttuEMxxpiQsEQSYlnZuQzqmkpKog0bb4xpnCyRhFD+4RK+3LLfhkUxxjRqlkhCaEFOHuVqw6IYYxo3SyQhlLUul6RmsQzpZsPGG2MaL0skITQ/O5cRPdvQLM4+ZmNM42V7uBDZtv8wObmHrFnLGNPoWSIJkfnrcgE4NaNdDSWNMSa6WSIJkazsXNq1TKBvenK4QzHGmJCyRBIC5eXK/OxcRvdJQ8SGjTfGNG6WSELg650HyTtUbP0jxpgmwRJJCGRle8PG2/1HjDFNgSWSEMjKzqNP+2Q6tEoMdyjGGBNylkjqWVFJGYs25FltxBjTZFgiqWfLNu+jqKTcEokxpsmwRFLPstblEhsjjLRh440xTYQlkno2PzuXIV1TSU6IC3coxhjTICyR1KP8whJWbMu3036NMU2KJZJ69Pn6XFThVLs/uzGmCbFEUo+ysnNJTohjUNfUcIdijDENJqSJRETGisg3IpItIndVUeYyEVktIqtEZKrP9DIRWe4eM32mnyUiy9z0LBHpE8r3EIys7FxG9mpDfKzlZ2NM0xGyHmERiQUmA+cAW4HFIjJTVVf7lMkA7gZGqeo+EWnvs4rDqjq4klU/AVysqmtE5BfAb4FrQ/U+ArVlbyGb8gq59pQe4Q7FGGMaVCgPnYcD2aqao6rFwDTgYr8yNwCTVXUfgKruDmC9CqS4562A7fUUb51kZVcMG2/9I8aYpkVUNTQrFrkUGKuqE9zr8cAIVZ3kU2YGsBYYBcQC96nq+25eKbAcKAUeVNUZbvqpwAzgMHAAGKmqByrZ/kRgIkB6enrmtGnTavU+CgoKSE6ueSj4fy0vYu2+ch45o3mDjPgbaFwNzeIKjsUVHIsrOHWNa8yYMUtVdViNBVU1JA/gUmCKz+vxwON+ZWYB04F4oCewBUh18zq7v72AjUBv9/odvIQEcIfvNqp6ZGZmam3NnTu3xjJlZeU6+A+z9dbXl9d6O8EKJK5wsLiCY3EFx+IKTl3jApZoAPv7UDZtbQO6+rzu4qb52grMVNUSVd2AVzvJAFDVbe5vDvAJMERE2gGDVHWhW/514JSQvYMArd5xgH2FJYzOsKvZjTFNTygTyWIgQ0R6ikgz4Apgpl+ZGcAZACKSBvQFckSktYgk+EwfBawG9gGtRKSvW/4cYE0I30NA5rnb6tqFiMaYpihkZ22paqmITAJm4/V/PKeqq0Tkfrzq0kw371wRWQ2UAXeoap6InAI8JSLleMnuQXVne4nIDcDbbt4+4Keheg+Bmp+dy3HpLWnf0oaNN8Y0PSEdEEpV3wPe85t2r89zBW51D98ynwMnVrHO6Xj9KhGhqKSMRRv3ctWI7uEOxRhjwsKunKujJRv3UVxabqf9GmOaLEskdZSVnUt8rDC8Z5twh2KMMWFhiaSOsrL3MKRba1rYsPHGmCbKEkkd7D1UzKrtB+xuiMaYJs0SSR1UDBs/2vpHjDFNmCWSOpifnUvLxDgGdm4V7lCMMSZsLJHUkqoyb10uJ/dqS5wNG2+MacJsD1hLm/cWsnXfYWvWMsY0eZZIaqliWBTraDfGNHWWSGppfnYunVol0jOtRbhDMcaYsLJEUgtl5crn6/MY1SetQe49YowxkcwSSS18tS2f/MMl1j9ijDFYIqmVitvq2rDxxhhjiaRWstblckLHFNKSE8IdijHGhJ0lkiAdLi5j6aZ9jO5jd0M0xhiwRBK0RRv3UlxWzuiMduEOxRhjIoIlkiDNz86lWWwMw3vYsPHGGAOWSII2b10umd1b07xZbLhDMcaYiGCJJAi5BUdYs+OAnfZrjDE+LJEEYX62DYtijDH+LJEEYX52LimJcQywYeONMeYoSyQBUlWy1uVySu80YmNsWBRjjKlgiSRAG3IPsT2/yPpHjDHGjyWSAGVZ/4gxxlTKEkmAstbl0qV1c7q3TQp3KMYYE1EskQSgtKyc/+XkMdqGjTfGmGOENJGIyFgR+UZEskXkrirKXCYiq0VklYhM9ZleJiLL3WOmz3QRkT+JyFoRWSMiN4XyPQCs2JbPwaJS6x8xxphKxIVqxSISC0wGzgG2AotFZKaqrvYpkwHcDYxS1X0i0t5nFYdVdXAlq74W6Aocr6rlfsuExPx1uYjAKb0tkRhjjL9Q1kiGA9mqmqOqxcA04GK/MjcAk1V1H4Cq7g5gvT8H7lfV8iCWqZN52bn075RCmxbNQr0pY4yJOqKqoVmxyKXAWFWd4F6PB0ao6iSfMjOAtcAoIBa4T1Xfd/NKgeVAKfCgqs5w0/OAh4EfAHuAm1R1XSXbnwhMBEhPT8+cNm1ard5HXn4Bv14gfK9HPJcdFzmJpKCggOTk5HCHcQyLKzgWV3AsruDUNa4xY8YsVdVhNRZU1ZA8gEuBKT6vxwOP+5WZBUwH4oGewBYg1c3r7P72AjYCvd3rAuA29/yHwLyaYsnMzNTa+ucbc7T7nbN03to9tV5HKMydOzfcIVTK4gqOxRUciys4dY0LWKIB7O9D2bS1Da8vo0IXN83XVmCmqpao6ga82kkGgKpuc39zgE+AIT7LvOOeTwcGhiL4Cqtzy2gWF8OwHq1DuRljjIlaoUwki4EMEekpIs2AK4CZfmVmAGcAiEga0BfIEZHWIpLgM30UsNpnmTHu+el4ySdkVuWVcVKP1iTG27DxxhhTmZCdtaWqpSIyCZiN1//xnKquEpH78apLM928c0VkNVAG3KGqeSJyCvCUiJTjJbsH9duzvR4EXhWRX+E1c00I1XvYfbCIrQXKuD52N0RjjKlKyBIJgKq+B7znN+1en+cK3OoevmU+B06sYp37gfPrPdhK2LDxxhhTM7uyvRpZ6/JoEQ/9O6WEOxRjjIlYlkiq0bt9C07vEk+MDRtvjDFVCmnTVrT7xRl9+ISt4Q7DGGMimtVIjDHG1IklEmOMMXViicQYY0ydWCIxxhhTJ5ZIjDHG1IklEmOMMXViicQYY0ydWCIxxhhTJyG7sVUkEZE9wKZaLp4G5NZjOPXF4gqOxRUciys4jTWu7qpa46i1TSKR1IWILNFA7hDWwCyu4FhcwbG4gtPU47KmLWOMMXViicQYY0ydWCKp2dPhDqAKFldwLK7gWFzBadJxWR+JMcaYOrEaiTHGmDqxRGKMMaZOLJFUQ0TGisg3IpItIneFOx4AEXlORHaLyFfhjsWXiHQVkbkislpEVonIzeGOCUBEEkVkkYh86eL6Q7hj8iUisSLyhYjMCncsFURko4isFJHlIrIk3PFUEJFUEXlLRL4WkTUicnIExHSc+5wqHgdE5JZwxwUgIr9yv/mvROQ1EUkM2basj6RyIhILrAXOAbYCi4ErVXV1mOM6DSgAXlLVAeGMxZeIdAQ6quoyEWkJLAUuiYDPS4AWqlogIvFAFnCzqi4IZ1wVRORWYBiQoqoXhDse8BIJMExVI+oCOxF5EZinqlNEpBmQpKr7wx1XBbfP2AaMUNXaXgBdX7F0xvut91PVwyLyBvCeqr4Qiu1ZjaRqw4FsVc1R1WJgGnBxmGNCVT8D9oY7Dn+qukNVl7nnB4E1QOfwRgXqKXAv490jIo6eRKQLcD4wJdyxRDoRaQWcBjwLoKrFkZREnLOA9eFOIj7igOYiEgckAdtDtSFLJFXrDGzxeb2VCNgxRgMR6QEMARaGNxKPaz5aDuwG5qhqRMQF/BP4NVAe7kD8KPCBiCwVkYnhDsbpCewBnndNgVNEpEW4g/JzBfBauIMAUNVtwN+BzcAOIF9VPwjV9iyRmHolIsnA28Atqnog3PEAqGqZqg4GugDDRSTsTYIicgGwW1WXhjuWSoxW1aHAecCNrjk13OKAocATqjoEOARERL8lgGtquwh4M9yxAIhIa7wWlJ5AJ6CFiFwVqu1ZIqnaNqCrz+subpqpguuDeBt4VVXfCXc8/lxTyFxgbLhjAUYBF7n+iGnAmSLySnhD8rijWVR1NzAdr5k33LYCW31qk2/hJZZIcR6wTFV3hTsQ52xgg6ruUdUS4B3glFBtzBJJ1RYDGSLS0x1tXAHMDHNMEct1aj8LrFHVh8MdTwURaSciqe55c7yTJ74Ob1SgqnerahdV7YH32/pYVUN2xBgoEWnhTpbANR2dC4T9DEFV3QlsEZHj3KSzgLCeyOHnSiKkWcvZDIwUkST3v3kWXr9lSMSFasXRTlVLRWQSMBuIBZ5T1VVhDgsReQ04A0gTka3A71X12fBGBXhH2OOBla4/AuAeVX0vjDEBdARedGfUxABvqGrEnGobgdKB6d6+hzhgqqq+H96Qjvol8Ko7sMsBrgtzPMDRhHsO8H/hjqWCqi4UkbeAZUAp8AUhHC7FTv81xhhTJ9a0ZYwxpk4skRhjjKkTSyTGGGPqxBKJMcaYOrFEYowxpk4skRhTSyJS5jfya71daS0iPSJthGdjqmLXkRhTe4fd0CvGNGlWIzGmnrn7efzN3dNjkYj0cdN7iMjHIrJCRD4SkW5uerqITHf3TPlSRCqGsogVkWfcPSU+cFfmIyI3ufu+rBCRaWF6m8YcZYnEmNpr7te0dbnPvHxVPRF4HG+UX4DHgBdVdSDwKvCom/4o8KmqDsIbP6piBIUMYLKq9gf2Az9y0+8Chrj1/CxUb86YQNmV7cbUkogUqGpyJdM3Ameqao4byHKnqrYVkVy8m3+VuOk7VDVNRPYAXVT1iM86euANeZ/hXt8JxKvqAyLyPt7NzWYAM3zut2JMWFiNxJjQ0CqeB+OIz/Myvu3TPB+YjFd7WexuXGRM2FgiMSY0Lvf5+z/3/HO8kX4BxgHz3POPgJ/D0ZtwtapqpSISA3RV1bnAnUAr4JhakTENyY5kjKm95j4jHQO8r6oVpwC3FpEVeLWKK920X+Ld4e8OvLv9VYxeezPwtIhcj1fz+DneXe0qEwu84pKNAI9G4C1nTRNjfSTG1DPXRzJMVXPDHYsxDcGatowxxtSJ1UiMMcbUidVIjDHG1IklEmOMMXViicQYY0ydWCIxxhhTJ5ZIjDHG1Mn/B6oaBzYRR+bHAAAAAElFTkSuQmCC\n",
      "text/plain": [
       "<Figure size 432x288 with 1 Axes>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "#=================================================Main================================================================\n",
    "tf.reset_default_graph()\n",
    "\n",
    "#channel specs\n",
    "chan_type = \"ISI\"\n",
    "chan_param = {\"ISI_distrib\": \"rayleigh\",\n",
    "              \"chan_filt_width\":3, \n",
    "              \"stride\": 1,\n",
    "              \"distrib_param\": 1,\n",
    "              \"noise_std\": 1}\n",
    "channel = {\"type\": chan_type, \"chan_param\": chan_param}\n",
    "\n",
    "#Enc and dec filter specs\n",
    "enc_L1 = {\"filt_sz\": [16, 32, 8, 4, 16], \"n_ch\":[32, 32, 32, 32, 32], \"strides\":[2, 16, 4, 4, 8]}\n",
    "\n",
    "dec_L1 = {\"filt_sz\": [16, 32, 8, 4, 16], \"n_ch\":[32, 32, 32, 32, 32], \"strides\":[4, 16, 4, 1, 8]}\n",
    "\n",
    "#List of attributes for each layer. The keys within a layer map to attributes for PARALLEL filters\n",
    "# enc_filters = [enc_L1, enc_L2] \n",
    "enc_filters = [enc_L1] \n",
    "dec_filters = [dec_L1]\n",
    "\n",
    "#bit vector dims\n",
    "chan_enc_in_len = 400\n",
    "numb_tx_bits = 600\n",
    "\n",
    "#Dimensions of FC layers. All have RELU acts, except last layer of enc(tanh) and dec(None) \n",
    "enc_FCs = [512, numb_tx_bits]\n",
    "# dec_FCs = [4096, 1024, chan_enc_in_len]\n",
    "dec_FCs = [1024, 256, 40]\n",
    "\n",
    "sys_config = Config(enc_filters, dec_filters, numb_tx_bits, chan_enc_in_len, channel, enc_FCs, dec_FCs,\n",
    "                   n_epochs = 20, batch_size = 100, batch_per_epoch= int(1e3), train_size = int(1e5), test_size = int(1e6),\n",
    "                   print_every = 1e3, init_lr = 0.001)\n",
    "\n",
    "print(vars(sys_config))\n",
    "channel_system = ChannelSystem(sys_config)\n",
    "\n",
    "with tf.Session() as sess:\n",
    "    sess.run(tf.global_variables_initializer())\n",
    "    train_accs = channel_system.train(sess) #params are weights of encoder and decoder conv layers\n",
    "    test_accs = channel_system.test(sess) \n",
    "      \n",
    "plt.plot(train_accs)\n",
    "plt.title(\"Accuracy with epochs, (inp_len, tx_bits) = ({}, {})\".format(str(chan_enc_in_len), str(numb_tx_bits)))\n",
    "plt.xlabel('Epochs')\n",
    "plt.ylabel('Accuracy')\n",
    "plt.grid(True)  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[<tf.Variable 'encoder/encL1P1:0' shape=(16, 1, 32) dtype=float32_ref>, <tf.Variable 'encoder/encL1P2:0' shape=(32, 1, 32) dtype=float32_ref>, <tf.Variable 'encoder/encL1P3:0' shape=(8, 1, 32) dtype=float32_ref>, <tf.Variable 'encoder/encL1P4:0' shape=(4, 1, 32) dtype=float32_ref>, <tf.Variable 'encoder/encL1P5:0' shape=(16, 1, 32) dtype=float32_ref>, <tf.Variable 'encoder/fully_connected/weights:0' shape=(1536, 512) dtype=float32_ref>, <tf.Variable 'encoder/fully_connected/biases:0' shape=(512,) dtype=float32_ref>, <tf.Variable 'encoder/dense/kernel:0' shape=(512, 450) dtype=float32_ref>, <tf.Variable 'encoder/dense/bias:0' shape=(450,) dtype=float32_ref>]\n"
     ]
    }
   ],
   "source": [
    "print([var for var in tf.trainable_variables()])\n",
    "# print(vars(sys_config))\n",
    "\n",
    "# enc_L1[\"pool_size\"] = [4, 4, 4, 4]\n",
    "# enc_L1[\"pool_strides\"] = [4, 4, 4, 4]\n",
    "# enc_L2[\"pool_size\"] = [4,4,4]\n",
    "# enc_L2[\"pool_strides\"] = [4,4,4]\n",
    "\n",
    "# dec_L1[\"pool_size\"] = enc_L1[\"pool_size\"]\n",
    "# dec_L1[\"pool_strides\"]= enc_L1[\"pool_strides\"]\n",
    "# dec_L2[\"pool_size\"] = enc_L2[\"pool_size\"]\n",
    "# dec_L2[\"pool_strides\"]= enc_L2[\"pool_strides\"]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# #Original \n",
    "# class ChannelEncoder(object):\n",
    "#     def __init__(self, enc_input, isTrain, config):\n",
    "#         self.isTrain = isTrain\n",
    "#         self.config = config\n",
    "#         self.enc_input = enc_input\n",
    "#         self.enc_out_nonbin = self.build_chan_enc_net()\n",
    "#         self.enc_out = self.binarize(self.enc_out_nonbin)\n",
    "#     def build_chan_enc_net(self):\n",
    "        \n",
    "#         enc_filters = self.config.enc_filters #list of dicts for enc filter dims\n",
    "#         num_layers = len(enc_filters)\n",
    "        \n",
    "#         layer_outs = []\n",
    "#         layer_input = self.enc_input\n",
    "#         layer_input = tf.expand_dims(self.enc_input, len(self.enc_input.shape))\n",
    "\n",
    "#         for l, layer_dims in enumerate(enc_filters): #iterate over encoder layers \n",
    "#             n_ch = layer_dims[\"n_ch\"]\n",
    "#             filt_sz = layer_dims[\"filt_sz\"]\n",
    "#             strides = layer_dims[\"strides\"]\n",
    "#             #With pooling\n",
    "# #             pool_size = layer_dims[\"pool_size\"]\n",
    "# #             pool_strides = layer_dims[\"pool_strides\"]\n",
    "            \n",
    "#             num_parallels = len(filt_sz)\n",
    "            \n",
    "#             parallels = []\n",
    "#             for p in range(num_parallels): #iterate over parallels\n",
    "#                 filt_name = \"enc\"+\"L\"+str(l+1)+\"P\"+str(p+1)\n",
    "#                 n_chan_prev = 1 \n",
    "#                 filt_var = tf.get_variable(name = filt_name, shape = [filt_sz[p], n_chan_prev, n_ch[p]], initializer = tf.contrib.layers.xavier_initializer(), \n",
    "#                     dtype = tf.float32)\n",
    "#                 Z = tf.nn.conv1d(layer_input, filt_var, strides[p], 'SAME')\n",
    "#                 A = tf.nn.relu(Z)\n",
    "#                 #P1 = tf.nn.max_pool(A1, ksize = [1, poolSz[0], poolSz[0], 1], strides = [1, poolSz[0], poolSz[0], 1], padding = \"SAME\")\n",
    "# #                 P = tf.layers.max_pooling1d(A, pool_size[p], pool_strides[p], padding = \"SAME\")\n",
    "#                 P = A\n",
    "#                 F = tf.contrib.layers.flatten(P)\n",
    "#                 parallels.append(F)\n",
    "#             concatenated = tf.concat(parallels, axis=1)\n",
    "#             print(concatenated)\n",
    "#             layer_outs.append(concatenated)\n",
    "#             layer_input = tf.expand_dims(layer_outs[-1], len(layer_outs[-1].shape))\n",
    "        \n",
    "# #         print(layer_outs[-1])\n",
    "        \n",
    "#         fc_acts = []\n",
    "#         fc_acts.append(layer_outs[-1])\n",
    "        \n",
    "#         for fc in range(len(self.config.enc_FCs) - 1):\n",
    "#             fc = tf.contrib.layers.fully_connected(fc_acts[-1], self.config.enc_FCs[fc], activation_fn = tf.nn.relu)\n",
    "#             fc_acts.append(fc)\n",
    "        \n",
    "#         fc_out = tf.contrib.layers.fully_connected(fc_acts[-1], self.config.enc_FCs[-1], activation_fn = tf.tanh)\n",
    "        \n",
    "#         return tf.expand_dims(fc_out, len(fc_out.shape)) #Expand dims to fit conv across channel\n",
    "    \n",
    "#     def training_binarizer(self, input_layer):\n",
    "#         \"\"\"Binarizer function used at training\n",
    "#         \"\"\"\n",
    "#         prob = tf.truediv(tf.add(1.0, input_layer), 2.0)\n",
    "#         bernoulli = tf.contrib.distributions.Bernoulli(probs=prob, dtype=tf.float32)\n",
    "#         return 2 * bernoulli.sample() - 1\n",
    "\n",
    "#     def test_binarizer(self, input_layer):\n",
    "#         \"\"\"Binarizer function used during testing\n",
    "#         \"\"\"\n",
    "#         ones = tf.ones_like(input_layer,dtype=tf.float32)\n",
    "#         neg_ones = tf.scalar_mul(-1.0, ones)\n",
    "#         return tf.where(tf.less(input_layer,0.0), neg_ones, ones)\n",
    "\n",
    "#     def binarize(self, reduced_states):\n",
    "#         binarized = tf.cond(self.isTrain,\n",
    "#                             partial(self.training_binarizer, reduced_states),\n",
    "#                             partial(self.test_binarizer, reduced_states))\n",
    "\n",
    "#         pass_through = tf.identity(reduced_states) # this is used for pass through gradient back prop\n",
    "#         return pass_through + tf.stop_gradient(binarized - pass_through)\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "##Original\n",
    "# class ChannelDecoder(object):\n",
    "#     def __init__(self, chan_out, isTrain, config):\n",
    "#         self.isTrain = isTrain\n",
    "#         self.config = config\n",
    "#         self.dec_input = chan_out\n",
    "#         self.dec_out = self.build_chan_dec_net()\n",
    "#         self.dec_labels = tf.where(tf.less(self.dec_out,0.0), \n",
    "#                                  -1 * tf.ones_like(self.dec_out,dtype=tf.float32), \n",
    "#                                  tf.ones_like(self.dec_out,dtype=tf.float32))\n",
    "    \n",
    "#     def build_chan_dec_net(self):\n",
    "        \n",
    "#         dec_filters = self.config.dec_filters #list of dicts for enc filter dims\n",
    "#         num_layers = len(dec_filters)\n",
    "        \n",
    "#         layer_outs = []\n",
    "#         layer_input = self.dec_input\n",
    "#         for l, layer_dims in enumerate(dec_filters): #iterate over encoder layers \n",
    "#             n_ch = layer_dims[\"n_ch\"]\n",
    "#             filt_sz = layer_dims[\"filt_sz\"]\n",
    "#             strides = layer_dims[\"strides\"]\n",
    "            \n",
    "#             #With pooling\n",
    "# #             pool_size = layer_dims[\"pool_size\"]\n",
    "# #             pool_strides = layer_dims[\"pool_strides\"]\n",
    "            \n",
    "#             num_parallels = len(filt_sz)\n",
    "            \n",
    "#             parallels = []\n",
    "#             for p in range(num_parallels): #iterate over parallels\n",
    "#                 filt_name = \"dec\"+\"L\"+str(l+1)+\"P\"+str(p+1)\n",
    "#                 n_chan_prev = 1 \n",
    "#                 filt_var = tf.get_variable(name = filt_name, shape = [filt_sz[p], n_chan_prev, n_ch[p]], initializer = tf.contrib.layers.xavier_initializer(), \n",
    "#                     dtype = tf.float32)\n",
    "#                 Z = tf.nn.conv1d(layer_input, filt_var, strides[p], 'SAME')\n",
    "#                 A = tf.nn.relu(Z)\n",
    "# #                 P = tf.layers.max_pooling1d(A, pool_size[p], pool_strides[p], padding = \"SAME\")\n",
    "                \n",
    "#                 P = A\n",
    "#                 F = tf.contrib.layers.flatten(P)\n",
    "#                 parallels.append(F)\n",
    "#             concatenated = tf.concat(parallels, axis=1)\n",
    "#             print(concatenated)\n",
    "#             layer_outs.append(concatenated)\n",
    "#             layer_input = tf.expand_dims(layer_outs[-1], len(layer_outs[-1].shape))\n",
    "      \n",
    "#         fc_acts = []\n",
    "#         fc_acts.append(layer_outs[-1])\n",
    "        \n",
    "#         for fc in range(len(self.config.dec_FCs) - 1):\n",
    "#             fc = tf.contrib.layers.fully_connected(fc_acts[-1], self.config.dec_FCs[fc], activation_fn = tf.nn.relu)\n",
    "#             fc_acts.append(fc)\n",
    "        \n",
    "#         fc_out = tf.contrib.layers.fully_connected(fc_acts[-1], self.config.dec_FCs[-1], activation_fn = None)\n",
    "    \n",
    "#         return fc_out"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "##Original \n",
    "# class ChannelSystem():\n",
    "#     def __init__(self, config):\n",
    "#         self.config = config\n",
    "        \n",
    "#         #self.learn_rate = tf.placeholder(tf.float32,shape=(), name='learn_rate')\n",
    "#         self.lr = tf.placeholder(tf.float32, shape = [], name = 'lr')\n",
    "#         self.X = tf.placeholder(tf.float32, shape = [None, self.config.chan_enc_in_len], name = 'dataset_iter_X')\n",
    "#         self.Y = tf.placeholder(tf.float32, shape = [None, self.config.chan_dec_out_len], name = 'dataset_iter_Y')\n",
    "# #         print(\"X:\", self.X) \n",
    "#         self.isTrain = tf.placeholder(tf.bool,shape=(), name='isTrain')\n",
    "# #         self.chan_enc_inputs = tf.placeholder(tf.float32, shape = [None, self.config.chan_enc_in_len], name = 'enc_inputs')\n",
    "# #         self.dec_targets = tf.placeholder(tf.float32, shape = [None, self.config.chan_dec_out_len], name = 'dec_targets')\n",
    "            \n",
    "#         self.dataset = tf.data.Dataset.from_tensor_slices((self.X, self.Y)).shuffle(buffer_size=100).batch(config.batch_size).repeat()\n",
    "        \n",
    "#         self.iterator = self.dataset.make_initializable_iterator()\n",
    "        \n",
    "#         self.chan_enc_inputs, self.dec_targets = self.iterator.get_next()\n",
    "#         #Build channel coder\n",
    "#         print(\"Chan enc inputs: \", self.chan_enc_inputs)\n",
    "#         self.channel_encoder = ChannelEncoder(self.chan_enc_inputs, self.isTrain, config)\n",
    "#         print(\"Channel input: \", self.channel_encoder.enc_out)\n",
    "#         self.channel = ISI_Channel(self.channel_encoder.enc_out, config, noise = True)\n",
    "#         print(\"Channel output: \", self.channel.chan_out)\n",
    "#         self.channel_decoder = ChannelDecoder(self.channel.chan_out, self.isTrain, config)\n",
    "#         print(\"Dec out: \", self.channel_decoder.dec_out)\n",
    "#         self.dec_labels = self.channel_decoder.dec_labels\n",
    "#         self.accuracy = self.define_accuracy()\n",
    "#         self.loss, self.train_op = self.define_loss()\n",
    "        \n",
    "        \n",
    "    \n",
    "#     \"\"\"\n",
    "#     Script to generate train and test data\n",
    "#     \"\"\"\n",
    "#     def gen_bin_data(self, num_samples, sample_len, seed_num = None):\n",
    "        \n",
    "#         if (seed_num != None):\n",
    "#             np.random.seed(seed_num)\n",
    "#         randMat = np.random.rand(num_samples, sample_len)\n",
    "#         binMat = np.where(randMat > 0.5, 1.0, -1.0)\n",
    "#         return binMat\n",
    "    \n",
    "#     def define_accuracy(self):\n",
    "#         eq_indicator = tf.cast(tf.equal(self.dec_labels, self.dec_targets), dtype=tf.float32)\n",
    "#         return tf.reduce_mean(eq_indicator)\n",
    "\n",
    "#     def define_loss(self):\n",
    "#         adapted_targets = tf.cast(self.dec_targets > 0, tf.float32)\n",
    "#         loss = tf.losses.hinge_loss(adapted_targets, self.channel_decoder.dec_out)\n",
    "\n",
    "#         train_op = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(loss) \n",
    "#         return loss, train_op\n",
    "    \n",
    "#     def train(self, sess):\n",
    "        \n",
    "#         params = tf.trainable_variables()\n",
    "#         num_params = sum(\n",
    "#             map(lambda t: np.prod(tf.shape(t.value()).eval()), params))\n",
    "#         print('Total model parameters: ', num_params)\n",
    "#         self.training_counter = 0\n",
    "#         accuracies = []\n",
    "#         try:\n",
    "#             tic = time.time() \n",
    "#             curr_lr = self.config.init_lr\n",
    "#             for epoch in range(self.config.n_epochs):\n",
    "            \n",
    "#                 # =============================  Train on Training Data ===============================\n",
    "#                 train_data = self.gen_bin_data(self.config.train_size, self.config.chan_enc_in_len)\n",
    "#                 sess.run(self.iterator.initializer, feed_dict = {self.X: train_data, self.Y: train_data})\n",
    "#                 if (epoch > 0): curr_lr = curr_lr / np.sqrt(epoch)\n",
    "#                 train_fd = {self.isTrain: True, self.lr : curr_lr}\n",
    "                \n",
    "#                 for i in range(self.config.batch_per_epoch):\n",
    "                    \n",
    "# #                     _, loss, channel_filt_1 = sess.run([self.train_op, self.loss, self.channel.filter], feed_dict = train_fd)\n",
    "# #                     channel_filt_2 = sess.run(self.channel.filter)\n",
    "# #                     print(channel_filt_1, channel_filt_2)\n",
    "#                     loss, _, optimizer_lr = sess.run([self.loss, self.train_op, self.lr], train_fd)\n",
    "#                     self.training_counter += 1\n",
    "# #                     print(sess.run(self.channel.filter, train_fd))\n",
    "#                     if self.training_counter % self.config.print_every == 0:\n",
    "#                         toc = time.time()\n",
    "\n",
    "#                         acc = sess.run(self.accuracy, train_fd)\n",
    "\n",
    "#                         print(\"Epoch: \", epoch + 1, \n",
    "#                               \"Accuracy: \", acc,\n",
    "#                               \"Training iteration: \", self.training_counter,\n",
    "#                               \"Training time: \", int(toc-tic), \"s\",\n",
    "#                               \"Training loss: \", loss,\n",
    "#                               \"Learning rate\", optimizer_lr)\n",
    "#                         accuracies.append(acc)\n",
    "\n",
    "# #                     if self.training_counter % self.config.save_every == 0:\n",
    "# #                         self.saver.save(sess, self.config.model_save_path, global_step=self.training_counter // self.config.save_every)\n",
    "# #                         print(\"Model saved in file: %s\" % self.config.model_save_path)\n",
    "\n",
    "\n",
    "# #                 # =========================== Save the Model ==========================================\n",
    "# #                 self.saver.save(sess, self.config.model_save_path, global_step=i)\n",
    "# #                 print(\"Model saved in file: %s\" % self.config.model_save_path)\n",
    "\n",
    "#         except KeyboardInterrupt:\n",
    "#             print('training interrupted')\n",
    "#         #Save model, plot accuracies    \n",
    "# #         self.saver.save(sess, self.config.model_save_path, global_step=self.training_counter)\n",
    "# #         print(\"Model saved in file: %s\" % self.config.model_save_path) \n",
    "        \n",
    "#         return accuracies\n",
    "    \n",
    "#     def test(self, sess):\n",
    "        \n",
    "#         test_data = self.gen_bin_data(self.config.test_size, self.config.chan_enc_in_len)\n",
    "        \n",
    "#         #sess.run(self.iterator.initializer, feed_dict = {self.X: test_data, self.Y: test_data})\n",
    "#         print(\"Testing...\")\n",
    "#         test_fd = {self.isTrain : False}\n",
    "        \n",
    "#         self.chan_enc_inputs, self.dec_targets = (test_data, test_data)\n",
    "        \n",
    "#         test_acc = sess.run(self.accuracy, feed_dict = test_fd) \n",
    "#         test_loss = sess.run(self.loss, feed_dict = test_fd)\n",
    "# #         print(sess.run(self.channel_encoder.enc_out, test_fd))\n",
    "#         print(\"Test loss: \", test_loss, \"\\nTest accuracy: \", test_acc)\n",
    "\n",
    "#         return test_acc\n",
    "        "
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
