{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Importing the required packages\n",
    "import torch as tr\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import scipy.io as sio\n",
    "import math\n",
    "import time\n",
    "import sys\n",
    "from matplotlib import animation, rc\n",
    "from IPython.display import HTML"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Defining some of the functions used\n",
    "def awgn_channel(x):\n",
    "    return x + np.sqrt(sigma2)*tr.randn(M*stacks,channel_uses).to(Device)\n",
    "def normalization(x): # E[|x|^2] = 1\n",
    "    return x / tr.sqrt((2*(x**2)).mean())\n",
    "def save():\n",
    "        tr.save({\n",
    "         'model_state_dict' : encoder.state_dict(), \n",
    "         'optimizer_state_dict': optimizer.state_dict(),\n",
    "         'loss': loss_history,\n",
    "         'constellations': Constellations,\n",
    "         'SNR': EsNo_dB,\n",
    "         'epochs' : epochs,\n",
    "         'stacks' : stacks,\n",
    "         'learning_rate': learning_rate,\n",
    "         'Device': Device,\n",
    "         'time': time.time()-start_time},'./Data/MI/' + str(channel_uses) + 'D/' + str(M) + '/MI_' + Estimation_type_save + '_' + str(channel_uses) + 'D_' + str(M) + '_' + str(EsNo_dB) + 'dB_'+ str(learning_rate)+'lr')\n",
    "\n",
    "        sio.savemat('./Data/MI/' + str(channel_uses) + 'D/' + str(M) + '/MI_' + Estimation_type_save + '_' + str(channel_uses) + 'D_' + str(M) + '_' + str(EsNo_dB) + 'dB_'+ str(learning_rate)+'lr', {\n",
    "         'model_state_dict' : encoder.state_dict(), \n",
    "         'optimizer_state_dict': optimizer.state_dict(),\n",
    "         'loss': loss_history,\n",
    "         'constellations': Constellations,\n",
    "         'SNR': EsNo_dB,\n",
    "         'epochs' : epochs,\n",
    "         'stacks' : stacks,\n",
    "         'learning_rate': learning_rate,\n",
    "         'Device': Device,\n",
    "         'time': time.time()-start_time})\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#The input parameters of the optimization\n",
    "\n",
    "M = 1024 #The cardinality size of the constellation\n",
    "stacks = 100 #The amount of noise samples per symbol for MC optimization\n",
    "channel_uses = 2 #The dimensionality of the constellation\n",
    "learning_rate = 0.01 #Learning Rate\n",
    "EsNo_dB = 26 # The SNR\n",
    "epochs = 200 #The amount of iterations\n",
    "Estimation_type = 'GH' #GH or MC\n",
    "Device = 'cpu' #Determines the device which the optimization is done on, 'cpu' for cpu and 'cuda:0', 'cuda:1' etc. for GPU"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'np' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-1-3a3b590aa5ce>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m      1\u001b[0m \u001b[1;31m#Defining some of the parameters of the optimization\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      2\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 3\u001b[1;33m \u001b[0mm\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlog2\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mM\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;31m#The amount of bits per symbol\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m      4\u001b[0m \u001b[0mEsNo_r\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;36m10\u001b[0m\u001b[1;33m**\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mEsNo_dB\u001b[0m\u001b[1;33m/\u001b[0m\u001b[1;36m10\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      5\u001b[0m \u001b[0msigma2\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;36m1\u001b[0m\u001b[1;33m/\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mchannel_uses\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mEsNo_r\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;31m# noise variance per channel use\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mNameError\u001b[0m: name 'np' is not defined"
     ]
    }
   ],
   "source": [
    "#Defining some of the parameters of the optimization\n",
    "\n",
    "m = np.log2(M) #The amount of bits per symbol\n",
    "EsNo_r = 10**(EsNo_dB/10)\n",
    "sigma2 = 1/(channel_uses*EsNo_r) # noise variance per channel use\n",
    "GH =  sio.loadmat('GaussHermite_J_10.mat')#Loading in the Gauss-Hermite points\n",
    "X_eye = tr.eye(M).to(Device) # The input to our neural network\n",
    "X_tilde = X_eye.repeat(stacks,1).to(Device)\n",
    "if Estimation_type == 'MC':\n",
    "    Estimation_type_save = Estimation_type + '_' + str(stacks) + 'Stacks'\n",
    "else:\n",
    "    Estimation_type_save = Estimation_type"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#=====================================================#\n",
    "# build the computation graph\n",
    "#=====================================================#\n",
    "def MI(X_tilde):\n",
    "    X = normalization(encoder(X_tilde)) # minibatch_size x channel_uses\n",
    "    Y = awgn_channel(X)                 # minibatch_size x channel_uses\n",
    "    #print(X[0:M:1,:])\n",
    "    # compute posterior distribution\n",
    "    XX = tr.reshape(X[0:M:,:], shape=[M, 1, channel_uses]).to(Device)\n",
    "    YY = tr.reshape(Y, shape=[1, M*stacks, channel_uses]).to(Device)\n",
    "\n",
    "    num = tr.exp(-tr.sum((Y-X)**2,1)/(2*sigma2)) # f_{Y|X}\n",
    "    den = (tr.exp(-tr.sum((YY-XX)**2,2)/(2*sigma2))).mean() # f_{Y}\n",
    "\n",
    "    posterior = num/den\n",
    "\n",
    "    epsilon = 0.000001 # to avoid log(x) = -Inf\n",
    "    loss = -(tr.log(posterior + epsilon)/np.log(2)).mean()\n",
    "    return loss\n",
    "\n",
    "def MI_GH(X_tilde):\n",
    "    X = normalization(encoder(X_tilde)) \n",
    "    GH_xi = tr.tensor(GH['xi'], dtype  = tr.float32).to(Device)#Load in the Gauss-Hermite points\n",
    "    GH_alpha = tr.tensor(GH['alpha'], dtype = tr.float32).to(Device)#Load in the Gauss-Hermite weigths\n",
    "    \n",
    "    Dmat = tr.zeros(M,M,channel_uses).to(Device)\n",
    "    Dmat[:,:,0] = X[:,0].unsqueeze(1) -(X[:,0].unsqueeze(1)).t() #Calculate the distances between constellation points\n",
    "    Dmat[:,:,1] = X[:,1].unsqueeze(1) -(X[:,1].unsqueeze(1)).t()\n",
    "\n",
    "    Es = (X[:,0]**2 + X[:,1]**2).mean() #Calculate the signal energy\n",
    "    EsN0lin = 10**(EsNo_dB/10)  #Turn the SNR value from dB to a linear value\n",
    "    SigmaZ2 = (Es/(EsN0lin)) #Calculate the noise variance\n",
    "    sum_0 = 0 #Initialize the sum \n",
    "\n",
    "    for l1 in range(10): #Dimension 1\n",
    "        for l2 in range(10): #Dimension 2\n",
    "             num = tr.exp(-((Dmat[:,:,0]**2 + Dmat[:,:,1]**2) + 2*tr.sqrt(SigmaZ2)*(GH_xi[l1]*Dmat[:,:,0] - GH_xi[l2]*Dmat[:,:,1]))/SigmaZ2)\n",
    "             sum_0 = GH_alpha[l1]*GH_alpha[l2]*tr.log(tr.sum(num,1))/tr.log(tr.tensor(2, dtype = tr.float32))  + sum_0\n",
    "    sum_0 = tr.sum(sum_0)\n",
    "    MI = m-1/M/math.pi*sum_0\n",
    "    loss = -MI\n",
    "    return loss\n",
    "\n",
    "def MI_GH_4D(X_tilde):\n",
    "    X = normalization(encoder(X_tilde)) \n",
    "    GH_xi = tr.tensor(GH['xi'], dtype  = tr.float32).to(Device)#Load in the Gauss-Hermite points\n",
    "    GH_alpha = tr.tensor(GH['alpha'], dtype = tr.float32).to(Device)#Load in the Gauss-Hermite weigths\n",
    "    \n",
    "    Dmat = tr.zeros(M,M,channel_uses).to(Device)\n",
    "    Dmat[:,:,0] = X[:,0].unsqueeze(1) -(X[:,0].unsqueeze(1)).t() #Calculate the distances between constellation points\n",
    "    Dmat[:,:,1] = X[:,1].unsqueeze(1) -(X[:,1].unsqueeze(1)).t()\n",
    "    Dmat[:,:,2] = X[:,2].unsqueeze(1) -(X[:,2].unsqueeze(1)).t()\n",
    "    Dmat[:,:,3] = X[:,3].unsqueeze(1) -(X[:,3].unsqueeze(1)).t()\n",
    "    Dmatnorm = Dmat[:,:,0]**2 + Dmat[:,:,1]**2+ Dmat[:,:,2]**2 + Dmat[:,:,3]**2\n",
    "    Es = (X[:,0]**2 + X[:,1]**2 + X[:,2]**2 + X[:,3]**2).mean() #Calculate the signal energy\n",
    "    EsN0lin = 10**(EsNo_dB/10)  #Turn the SNR value from dB to a linear value\n",
    "    SigmaZ2 = (Es/(EsN0lin)) #Calculate the noise variance\n",
    "    sum_0 = 0 #Initialize the sum \n",
    "    for l1 in range(10): #Dimension 1\n",
    "        for l2 in range(10): #Dimension 2\n",
    "            for l3 in range(10):\n",
    "                for l4 in range(10):\n",
    "                     num = tr.exp(-(Dmatnorm + np.sqrt(2)*tr.sqrt(SigmaZ2)*(GH_xi[l1]*Dmat[:,:,0] - GH_xi[l2]*Dmat[:,:,1] + GH_xi[l3]*Dmat[:,:,2] - GH_xi[l4]*Dmat[:,:,3]))/(0.5*SigmaZ2))\n",
    "                     sum_0 = GH_alpha[l1]*GH_alpha[l2]*GH_alpha[l3]*GH_alpha[l4]*tr.log(tr.sum(num,1))/tr.log(tr.tensor(2, dtype = tr.float32))  + sum_0\n",
    "    sum_0 = tr.sum(sum_0)\n",
    "    MI = m-1/M/(math.pi**2)*sum_0\n",
    "    loss = -MI\n",
    "    return loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Training the model\n",
    "start_time = time.time()\n",
    "encoder = tr.nn.Sequential()\n",
    "encoder.add_module('last', tr.nn.Linear(M,channel_uses,bias = False))\n",
    "encoder.to(Device)\n",
    "optimizer = tr.optim.Adam(encoder.parameters(), learning_rate)\n",
    "loss_history = np.zeros(epochs) # For saving the losses\n",
    "Constellations = np.zeros((M,channel_uses,epochs)) #For saving the constellations\n",
    "for i in range(1, epochs+1):\n",
    "    if Estimation_type == 'GH':\n",
    "        if channel_uses ==2:\n",
    "            loss = MI_GH(X_eye)\n",
    "        else:\n",
    "            loss = MI_GH_4D(X_eye)\n",
    "    else:\n",
    "        loss = MI(X_tilde)\n",
    "    loss_history[i-1] = loss\n",
    "    Constellations[:,:,i-1] = normalization(encoder(X_eye)).data.numpy()\n",
    "    optimizer.zero_grad()\n",
    "    loss.backward()\n",
    "    optimizer.step()\n",
    "    if i%100 == 0 or i == 1:\n",
    "        print('iter ', i, ' loss', loss, 'time', time.time()- start_time)\n",
    "    save()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Plotting the final constellation\n",
    "Constellation = normalization(encoder(X_tilde)).data.numpy() \n",
    "if channel_uses == 4:\n",
    "    fig = plt.figure()\n",
    "    ax = fig.add_subplot(131)\n",
    "    ax.scatter(Constellation[:,0],Constellation[:,1], marker = '.')\n",
    "    ax.set_aspect(1)\n",
    "    ax2 = fig.add_subplot(132)\n",
    "    ax2.scatter(Constellation[:,2],Constellation[:,3], marker = '.')\n",
    "    ax2.set_aspect(1)\n",
    "    ax3 = fig.add_subplot(133)\n",
    "    ax3.plot(loss_history)\n",
    "else:\n",
    "    fig = plt.figure()\n",
    "    ax = fig.add_subplot(121)\n",
    "    ax.scatter(Constellation[:,0],Constellation[:,1], marker = '.')\n",
    "    ax.set_aspect(1)\n",
    "    ax2 = fig.add_subplot(122)\n",
    "    ax2.plot(loss_history)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Creating an animation of the evolution of the constellation, only works for 2D constellations\n",
    "plt.rcParams['animation.embed_limit'] = 2**128\n",
    "def init():\n",
    "    line[0].set_data([],[])\n",
    "    line[1].set_data([],[])\n",
    "    return(line,)\n",
    "\n",
    "def animate(i):\n",
    "    x = Constellations[:,0,i]\n",
    "    y = Constellations[:,1,i]\n",
    "    line[0].set_data(x,y)\n",
    "    line[1].set_data(np.linspace(0,i,i),loss_history[0:i])\n",
    "    return(line,)\n",
    "\n",
    "\n",
    "fig = plt.figure()\n",
    "ax = fig.add_subplot(1,2,1)\n",
    "ax2 = fig.add_subplot(1,2,2)\n",
    "ax.set_xlim((-1.8,1.8))\n",
    "ax.set_ylim((-1.8,1.8))\n",
    "ax2.set_xlim((-1,epochs))\n",
    "ax2.set_ylim((-max(loss_history)-0.2,-min(loss_history)+0.2))\n",
    "ax.set_aspect('equal')\n",
    "ax2.set_xlabel('iterations')\n",
    "ax2.set_ylabel('loss')\n",
    "line1, = ax.plot([],[],'.',lw=2,)\n",
    "line2, = ax2.plot([],[],lw=2,)\n",
    "line1.set_color('b')\n",
    "line2.set_color('b')\n",
    "line = [line1, line2]\n",
    "anim = animation.FuncAnimation(fig,animate,init_func = init, frames = epochs, interval = 10, blit = False)\n",
    "anim.save('./Data/MI/' + str(channel_uses) + 'D/' + str(M) + '/MI_' + Estimation_type + '_' + str(channel_uses) + 'D_' + str(M) + '_' + str(EsNo_dB) + 'dB_'+ str(learning_rate)+'lr_animation.mp4') \n",
    "HTML(anim.to_jshtml())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
