{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "TensorFlow version: 2.0.0\n"
     ]
    }
   ],
   "source": [
    "import tensorflow as tf\n",
    "import datetime, os\n",
    "#hide tf logs\n",
    "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'  # or any {'0', '1', '2'},\n",
    "#0 (default) shows all, 1 to filter out INFO logs, 2 to additionally filter out WARNING logs, and 3 to additionally filter out ERROR logs\n",
    "import scipy.optimize\n",
    "import scipy.io\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import matplotlib.ticker\n",
    "import time\n",
    "from pyDOE import lhs         #Latin Hypercube Sampling\n",
    "import pandas as pd\n",
    "import seaborn as sns \n",
    "import codecs, json\n",
    "\n",
    "# generates same random numbers each time\n",
    "np.random.seed(1234)\n",
    "tf.random.set_seed(1234)\n",
    "\n",
    "print(\"TensorFlow version: {}\".format(tf.__version__))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# *Data Prep*\n",
    "\n",
    "Training and Testing data is prepared from the solution file"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "x_1 = np.linspace(-1,1,256)  # 256 points between -1 and 1 [256x1]\n",
    "x_2 = np.linspace(1,-1,256)  # 256 points between -1 and 1 [256x1]\n",
    "\n",
    "X, Y = np.meshgrid(x_1,x_2) "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Test Data\n",
    "\n",
    "We prepare the test data to compare against the solution produced by the PINN."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "X_u_test = np.hstack((X.flatten(order='F')[:,None], Y.flatten(order='F')[:,None]))\n",
    "\n",
    "# Domain bounds\n",
    "lb = np.array([-1, -1]) #lower bound\n",
    "ub = np.array([1, 1])  #upper bound\n",
    "\n",
    "a_1 = 1 \n",
    "a_2 = 4\n",
    "\n",
    "usol = np.sin(a_1 * np.pi * X) * np.sin(a_2 * np.pi * Y) #solution chosen for convinience  \n",
    "\n",
    "u = usol.flatten('F')[:,None] "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Training Data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def trainingdata(N_u,N_f):\n",
    "    \n",
    "    leftedge_x = np.hstack((X[:,0][:,None], Y[:,0][:,None]))\n",
    "    leftedge_u = usol[:,0][:,None]\n",
    "    \n",
    "    rightedge_x = np.hstack((X[:,-1][:,None], Y[:,-1][:,None]))\n",
    "    rightedge_u = usol[:,-1][:,None]\n",
    "    \n",
    "    topedge_x = np.hstack((X[0,:][:,None], Y[0,:][:,None]))\n",
    "    topedge_u = usol[0,:][:,None]\n",
    "    \n",
    "    bottomedge_x = np.hstack((X[-1,:][:,None], Y[-1,:][:,None]))\n",
    "    bottomedge_u = usol[-1,:][:,None]\n",
    "    \n",
    "    all_X_u_train = np.vstack([leftedge_x, rightedge_x, bottomedge_x, topedge_x])\n",
    "    all_u_train = np.vstack([leftedge_u, rightedge_u, bottomedge_u, topedge_u])  \n",
    "     \n",
    "    #choose random N_u points for training\n",
    "    idx = np.random.choice(all_X_u_train.shape[0], N_u, replace=False) \n",
    "    \n",
    "    X_u_train = all_X_u_train[idx, :] #choose indices from  set 'idx' (x,t)\n",
    "    u_train = all_u_train[idx,:]      #choose corresponding u\n",
    "    \n",
    "    '''Collocation Points'''\n",
    "\n",
    "    # Latin Hypercube sampling for collocation points \n",
    "    # N_f sets of tuples(x,t)\n",
    "    X_f = lb + (ub-lb)*lhs(2,N_f) \n",
    "    X_f_train = np.vstack((X_f, X_u_train)) # append training points to collocation points \n",
    "    \n",
    "    return X_f_train, X_u_train, u_train"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# PINN \n",
    "\n",
    "$W \\in \\mathcal{R}^{n_{l-1}\\times{n_l}}$ \n",
    "\n",
    "Creating sequential layers using the $\\textit{class}$ tf.Module"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Sequentialmodel(tf.Module): \n",
    "    def __init__(self, layers, name=None):\n",
    "\n",
    "        self.W = []  #Weights and biases\n",
    "        self.parameters = 0 #total number of parameters\n",
    "\n",
    "        for i in range(len(layers)-1):\n",
    "\n",
    "            input_dim = layers[i]\n",
    "            output_dim = layers[i+1]\n",
    "\n",
    "            #Xavier standard deviation \n",
    "            std_dv = np.sqrt((2.0/(input_dim + output_dim)))\n",
    "\n",
    "            #weights = normal distribution * Xavier standard deviation + 0\n",
    "            w = tf.random.normal([input_dim, output_dim], dtype = 'float64') * std_dv\n",
    "\n",
    "            w = tf.Variable(w, trainable=True, name = 'w' + str(i+1))\n",
    "\n",
    "            b = tf.Variable(tf.cast(tf.zeros([output_dim]), dtype = 'float64'), trainable = True, name = 'b' + str(i+1))\n",
    "\n",
    "            self.W.append(w)\n",
    "            self.W.append(b)\n",
    "\n",
    "            self.parameters +=  input_dim * output_dim + output_dim\n",
    "        \n",
    "        # Lagrange multipliers\n",
    "        \n",
    "        # Boundary terms      \n",
    "        self.lagrange_1 = tf.Variable(tf.cast(tf.ones([N_u,1]), dtype = 'float64'), trainable = True) \n",
    "        \n",
    "        # Residual terms\n",
    "        self.lagrange_2 = tf.Variable(tf.cast(tf.ones([N_f+N_u,1]), dtype = 'float64'), trainable = True)\n",
    "                \n",
    "    \n",
    "    def evaluate(self,x):\n",
    "        \n",
    "        #preprocessing input \n",
    "        x = (x - lb)/(ub - lb) #feature scaling\n",
    "        \n",
    "        a = x\n",
    "        \n",
    "        for i in range(len(layers)-2):\n",
    "            \n",
    "            z = tf.add(tf.matmul(a, self.W[2*i]), self.W[2*i+1])\n",
    "            a = tf.nn.tanh(z)\n",
    "            \n",
    "        a = tf.add(tf.matmul(a, self.W[-2]), self.W[-1]) # For regression, no activation to last layer\n",
    "        \n",
    "        return a\n",
    "    \n",
    "    def get_weights(self):\n",
    "\n",
    "        parameters_1d = []  # [.... W_i,b_i.....  ] 1d array\n",
    "        \n",
    "        for i in range (len(layers)-1):\n",
    "            \n",
    "            w_1d = tf.reshape(self.W[2*i],[-1])   #flatten weights \n",
    "            b_1d = tf.reshape(self.W[2*i+1],[-1]) #flatten biases\n",
    "            \n",
    "            parameters_1d = tf.concat([parameters_1d, w_1d], 0) #concat weights \n",
    "            parameters_1d = tf.concat([parameters_1d, b_1d], 0) #concat biases\n",
    "        \n",
    "        return parameters_1d\n",
    "        \n",
    "    def set_weights(self,parameters):\n",
    "                \n",
    "        for i in range (len(layers)-1):\n",
    "\n",
    "            shape_w = tf.shape(self.W[2*i]).numpy() # shape of the weight tensor\n",
    "            size_w = tf.size(self.W[2*i]).numpy() #size of the weight tensor \n",
    "            \n",
    "            shape_b = tf.shape(self.W[2*i+1]).numpy() # shape of the bias tensor\n",
    "            size_b = tf.size(self.W[2*i+1]).numpy() #size of the bias tensor \n",
    "                        \n",
    "            pick_w = parameters[0:size_w] #pick the weights \n",
    "            self.W[2*i].assign(tf.reshape(pick_w,shape_w)) # assign  \n",
    "            parameters = np.delete(parameters,np.arange(size_w),0) #delete \n",
    "            \n",
    "            pick_b = parameters[0:size_b] #pick the biases \n",
    "            self.W[2*i+1].assign(tf.reshape(pick_b,shape_b)) # assign \n",
    "            parameters = np.delete(parameters,np.arange(size_b),0) #delete \n",
    "            \n",
    "    def loss_BC(self,x,y):\n",
    "        \n",
    "        loss_u = y-self.evaluate(x)\n",
    "        loss_u = self.lagrange_1*loss_u # element-wise,shape = (N_u,1)\n",
    "        loss_u = tf.reduce_mean(tf.square(loss_u)) # squaring and averaging\n",
    "\n",
    "        return loss_u\n",
    "            \n",
    "    def loss_PDE(self, x_to_train_f):\n",
    "    \n",
    "        g = tf.Variable(x_to_train_f, dtype = 'float64')\n",
    "\n",
    "        k = 1    \n",
    "\n",
    "        x_1_f = g[:,0:1]\n",
    "        x_2_f = g[:,1:2]\n",
    "\n",
    "        with tf.GradientTape(persistent=True) as tape:\n",
    "\n",
    "            tape.watch(x_1_f)\n",
    "            tape.watch(x_2_f)\n",
    "\n",
    "            g = tf.stack([x_1_f[:,0], x_2_f[:,0]], axis=1)\n",
    "\n",
    "            u = self.evaluate(g)\n",
    "            u_x_1 = tape.gradient(u,x_1_f)\n",
    "            u_x_2 = tape.gradient(u,x_2_f)\n",
    "\n",
    "        u_xx_1 = tape.gradient(u_x_1,x_1_f)\n",
    "        u_xx_2 = tape.gradient(u_x_2,x_2_f)\n",
    "\n",
    "        del tape\n",
    "\n",
    "        q = -( (a_1*np.pi)**2 + (a_2*np.pi)**2 - k**2 ) * np.sin(a_1*np.pi*x_1_f) * np.sin(a_2*np.pi*x_2_f)\n",
    "\n",
    "        f = u_xx_1 + u_xx_2 + k**2 * u - q #residual\n",
    "\n",
    "        f = self.lagrange_2 * f # element-wise,shape = (N_f,1)\n",
    "        \n",
    "        loss_f = tf.reduce_mean(tf.square(f))\n",
    "\n",
    "        return loss_f\n",
    "    \n",
    "    def loss(self,x,y,g):\n",
    "\n",
    "        loss_u = self.loss_BC(x,y)\n",
    "        loss_f = self.loss_PDE(g)\n",
    "\n",
    "        loss = loss_u + loss_f\n",
    "\n",
    "        return loss, loss_u, loss_f \n",
    "    \n",
    "    def optimizerfunc(self,parameters):\n",
    "        \n",
    "        self.set_weights(parameters)\n",
    "       \n",
    "        with tf.GradientTape() as tape:\n",
    "            tape.watch(self.trainable_variables)\n",
    "            \n",
    "            loss_val, loss_u, loss_f = self.loss(X_u_train, u_train, X_f_train)\n",
    "            \n",
    "        grads = tape.gradient(loss_val,self.trainable_variables)\n",
    "                \n",
    "        del tape\n",
    "        \n",
    "        grads_1d = [ ] #store 1d grads \n",
    "        \n",
    "        for i in range (len(layers)-1):\n",
    "\n",
    "            grads_w_1d = tf.reshape(grads[2*i],[-1]) #flatten weights \n",
    "            grads_b_1d = tf.reshape(grads[2*i+1],[-1]) #flatten biases\n",
    "\n",
    "            grads_1d = tf.concat([grads_1d, grads_w_1d], 0) #concat grad_weights \n",
    "            grads_1d = tf.concat([grads_1d, grads_b_1d], 0) #concat grad_biases\n",
    "        \n",
    "        return loss_val.numpy(), grads_1d.numpy()\n",
    "    \n",
    "    def optimizer_callback(self,parameters):\n",
    "                \n",
    "        loss_value, loss_u, loss_f = self.loss(X_u_train, u_train, X_f_train)\n",
    "        \n",
    "        u_pred = self.evaluate(X_u_test)\n",
    "        error_vec = np.linalg.norm((u-u_pred),2)/np.linalg.norm(u,2)\n",
    "        \n",
    "        tf.print(loss_value, loss_u, loss_f, error_vec)\n",
    "        \n",
    "    def adaptive_gradients(self):\n",
    "\n",
    "        with tf.GradientTape() as tape:\n",
    "            tape.watch(self.W)\n",
    "            loss_val, loss_u, loss_f = self.loss(X_u_train, u_train, X_f_train)\n",
    "\n",
    "        grads = tape.gradient(loss_val,self.W)\n",
    "\n",
    "        del tape\n",
    "\n",
    "        with tf.GradientTape(persistent = True) as tape:\n",
    "            tape.watch(self.lagrange_1)\n",
    "            tape.watch(self.lagrange_2)\n",
    "            loss_val, loss_u, loss_f = self.loss(X_u_train, u_train, X_f_train)\n",
    "\n",
    "        grads_L1 = tape.gradient(loss_val,self.lagrange_1) # boundary terms\n",
    "        grads_L2 = tape.gradient(loss_val,self.lagrange_2) # residual terms\n",
    "\n",
    "        del tape\n",
    "\n",
    "        return loss_val, grads, grads_L1, grads_L2"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# *Loss Function*\n",
    "\n",
    "The loss function consists of two parts:\n",
    "1. **loss_BC**: MSE error of boundary losses\n",
    "2. **loss_PDE**: MSE error of collocation points satisfying the PDE\n",
    "\n",
    "**loss** = loss_BC + loss_PDE"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "N_u = 400 #Total number of data points for 'u'\n",
    "N_f = 10000 #Total number of collocation points \n",
    "\n",
    "# Training data\n",
    "X_f_train, X_u_train, u_train = trainingdata(N_u,N_f)\n",
    "\n",
    "layers = np.array([2,50,50,50,1])\n",
    "\n",
    "PINN = Sequentialmodel(layers)\n",
    "\n",
    "start_time = time.time() \n",
    "\n",
    "optimizer = tf.keras.optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07)\n",
    "\n",
    "optimizer_L1 = tf.keras.optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07)\n",
    "\n",
    "optimizer_L2 = tf.keras.optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07)\n",
    "\n",
    "num_epochs = 2500\n",
    "\n",
    "print('STIFF PROBLEM')\n",
    "\n",
    "for epoch in range(num_epochs):\n",
    "    \n",
    "        loss_value, grads, grads_L1, grads_L2 = PINN.adaptive_gradients()\n",
    "\n",
    "        if epoch % 100 == 0:\n",
    "            tf.print(loss_value)\n",
    "        \n",
    "        optimizer.apply_gradients(zip(grads, PINN.W)) #gradient descent weights \n",
    "        optimizer_L1.apply_gradients(zip([-grads_L1], [PINN.lagrange_1])) # gradient ascent adaptive coefficients of boundary residual\n",
    "        optimizer_L2.apply_gradients(zip([-grads_L2], [PINN.lagrange_2])) # gradient ascent adaptive coefficients of PDE residual\n",
    "              \n",
    "init_params = PINN.get_weights().numpy()\n",
    "    \n",
    "# train the model with Scipy L-BFGS optimizer\n",
    "results = scipy.optimize.minimize(fun = PINN.optimizerfunc, \n",
    "                                  x0 = init_params, \n",
    "                                  args=(), \n",
    "                                  method='L-BFGS-B', \n",
    "                                  jac= True,        # If jac is True, fun is assumed to return the gradient along with the objective function\n",
    "                                  callback = None, \n",
    "                                  options = {'disp': None,\n",
    "                                            'maxcor': 200, \n",
    "                                            'ftol': 1 * np.finfo(float).eps,  #The iteration stops when (f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol\n",
    "                                            'gtol': 5e-10, \n",
    "                                            'maxfun':  50000, \n",
    "                                            'maxiter': 2500,\n",
    "                                            'iprint': -1,   #print update every 50 iterations\n",
    "                                            'maxls': 50})\n",
    "\n",
    "elapsed = time.time() - start_time                \n",
    "print('Training time: %.2f' % (elapsed))\n",
    "\n",
    "print(results)\n",
    "\n",
    "PINN.set_weights(results.x)\n",
    "\n",
    "''' Model Accuracy ''' \n",
    "u_pred = PINN.evaluate(X_u_test)\n",
    "\n",
    "error_vec = np.linalg.norm((u-u_pred),2)/np.linalg.norm(u,2)        # Relative L2 Norm of the error (Vector)\n",
    "print('Test Error: %.5f'  % (error_vec))\n",
    "\n",
    "u_pred = np.reshape(u_pred,(256,256),order='F') \n",
    "\n",
    "# Plotting\n",
    "\n",
    "#Ground truth\n",
    "fig_1 = plt.figure(1, figsize=(18, 5))\n",
    "plt.subplot(1, 3, 1)\n",
    "plt.pcolor(x_1, x_2, usol, cmap='jet')\n",
    "plt.colorbar()\n",
    "plt.xlabel(r'$x_1$', fontsize=18)\n",
    "plt.ylabel(r'$x_2$', fontsize=18)\n",
    "plt.title('Ground Truth $u(x_1,x_2)$', fontsize=15)\n",
    "\n",
    "# Prediction\n",
    "plt.subplot(1, 3, 2)\n",
    "plt.pcolor(x_1, x_2, u_pred, cmap='jet')\n",
    "plt.colorbar()\n",
    "plt.xlabel(r'$x_1$', fontsize=18)\n",
    "plt.ylabel(r'$x_2$', fontsize=18)\n",
    "plt.title('Predicted $\\hat u(x_1,x_2)$', fontsize=15)\n",
    "\n",
    "# Error\n",
    "plt.subplot(1, 3, 3)\n",
    "plt.pcolor(x_1, x_2, np.abs(usol - u_pred), cmap='jet')\n",
    "plt.colorbar()\n",
    "plt.xlabel(r'$x_1$', fontsize=18)\n",
    "plt.ylabel(r'$x_2$', fontsize=18)\n",
    "plt.title(r'Absolute error $|u(x_1,x_2)- \\hat u(x_1,x_2)|$', fontsize=15)\n",
    "plt.tight_layout()\n",
    "\n",
    "# plt.savefig('Helmholtz_stiff_adaptive_weights.png', dpi = 500, bbox_inches='tight')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "with tf.GradientTape(persistent=True) as tape:\n",
    "    loss_value, loss_u, loss_f = PINN.loss(X_u_train, u_train, X_f_train)\n",
    "    grad_u = tape.gradient(loss_u, PINN.trainable_variables) \n",
    "    grad_f = tape.gradient(loss_f, PINN.trainable_variables)\n",
    "    del tape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Writing gradients to 2 separate JSON files \n",
    "\n",
    "L = len(layers)-1 #number of weights matrices\n",
    "\n",
    "for i in range (L*2):\n",
    "\n",
    "    temp = grad_f[i].numpy().tolist() # nested lists with same data, indices\n",
    "    json.dump(temp, codecs.open(\"Stiff_problem/Gradients/gradients_f\" + str(i) + \".json\", 'w', \n",
    "                                encoding='utf-8'), separators=(',', ':'), sort_keys=True, indent=0)\n",
    "    \n",
    "    temp = grad_u[i].numpy().tolist() # nested lists with same data, indices\n",
    "    json.dump(temp, codecs.open(\"Stiff_problem/Gradients/gradients_u\" + str(i) + \".json\", 'w', \n",
    "                                encoding='utf-8'), separators=(',', ':'), sort_keys=True, indent=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "metadata": {},
   "outputs": [],
   "source": [
    "f_1 = {} #residual non-stiff problem\n",
    "u_1 = {} #boundary non-stiff problem\n",
    "\n",
    "f_2 = {} #residual stiff problem\n",
    "u_2 = {} #boundary stiff problem\n",
    "\n",
    "L = 4\n",
    "\n",
    "#Reading gradients from JSON files and storing in a dict \n",
    "#storing gradients of both weights and biases\n",
    "\n",
    "for i in range (L):\n",
    "    \n",
    "    #weights and biases\n",
    "#     obj_text_w = codecs.open(\"Non_Stiff_problem/Gradients/gradients_f\" + str(2*i) + \".json\", 'r', encoding='utf-8').read()\n",
    "#     obj_text_b = codecs.open(\"Non_Stiff_problem/Gradients/gradients_f\" + str(2*i+1) + \".json\", 'r', encoding='utf-8').read()\n",
    "#     f_1['f'+ str(i)] = np.concatenate((np.array(json.loads(obj_text_w)).flatten('F'), np.array(json.loads(obj_text_b)).flatten('F')))\n",
    "    \n",
    "#     obj_text_w = codecs.open(\"Non_Stiff_problem/Gradients/gradients_u\" + str(2*i) + \".json\", 'r', encoding='utf-8').read()\n",
    "#     obj_text_b = codecs.open(\"Non_Stiff_problem/Gradients/gradients_u\" + str(2*i+1) + \".json\", 'r', encoding='utf-8').read()\n",
    "#     u_1['u'+ str(i)] = np.concatenate((np.array(json.loads(obj_text_w)).flatten('F'), np.array(json.loads(obj_text_b)).flatten('F')))\n",
    "    \n",
    "    obj_text_w = codecs.open(\"Stiff_problem/Gradients/gradients_f\" + str(2*i) + \".json\", 'r', encoding='utf-8').read()\n",
    "    obj_text_b = codecs.open(\"Stiff_problem/Gradients/gradients_f\" + str(2*i+1) + \".json\", 'r', encoding='utf-8').read()\n",
    "    f_2['f'+ str(i)] = np.concatenate((np.array(json.loads(obj_text_w)).flatten('F'), np.array(json.loads(obj_text_b)).flatten('F')))\n",
    "    \n",
    "    obj_text_w = codecs.open(\"Stiff_problem/Gradients/gradients_u\" + str(2*i) + \".json\", 'r', encoding='utf-8').read()\n",
    "    obj_text_b = codecs.open(\"Stiff_problem/Gradients/gradients_u\" + str(2*i+1) + \".json\", 'r', encoding='utf-8').read()\n",
    "    u_2['u'+ str(i)] = np.concatenate((np.array(json.loads(obj_text_w)).flatten('F'), np.array(json.loads(obj_text_b)).flatten('F')))\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAA6cAAAFACAYAAABAwgpjAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8vihELAAAACXBIWXMAAAsTAAALEwEAmpwYAAAmTklEQVR4nO3df7SddX0n+vcnCSH8VCoobYCCU0oJrImNKZbrjDp32ktAKxadXmCWzigVdY2OvTP3TnHp6gwXZznVKYNlUG6qTLS3Qqm1Mzim1eksHaZelF8lGkRKoDQEjYSfBfwRQr73j7MTjiGBk3P2Ps+z93m91jqLvZ+9z/5+9j558+zP8+P7VGstAAAA0KVFXRcAAAAAmlMAAAA6pzkFAACgc5pTAAAAOqc5BQAAoHOaUwAAADqnOQUAAKBzmtMxVFX3VtUvdV3HdFX1k1V1XVV9p6paVR3fdU0w33qazddW1V9U1aNVtbWqPlFVh3VdF8ynnmbzH1TVNwfZfKiq/qSqlnddF8y3PuZzuqq6avDd9me6rmUh0Jyy36pqyV4W70zyZ0neOM/lAAP7yOYLknwwyU8lOTnJ8iQfmc+6YKHbRza/leSM1toLM5XPu5J8fD7rAvaZz12P/b0kf2cey1nwNKcTpKqOqKr/WlXbquqRwe1jBo/9o6q6ZY/n/4uq+i+D2wdW1b+vqs1V9b2qurKqDho89pqq2lJVv1lVW5P8pz3Hbq19r7X2sSQ3jf6dwnjpOJufaa39WWvt+621R5L8XpJXjvxNwxjowXrzO9MWPZ3EnhkY6DKfg+ctSXJ5kveM9I3yYzSnk2VRpgL200mOS/KDJP9x8Nh1SU6oqpOnPf/NST49uP3vkvxskpdlauW4PMlvTXvu0Ul+YvDaF46mfJhYfcrmq5LcPps3AROo02xW1XFV9ehg3P8zyYfn+oZggnS97vw/klzfWvvGXN8IM1etta5rYD9V1b1Jfr219ufP87yXJflya+2Iwf2PJ3m4tfb+qjolyV9kKpzbkzyR5O+21u4ePPf0JJ9prZ1QVa9J8qUkh7fWfvg8Yy5J8lSSE1pr9872PcI46nM2B7/7y0muTfKK1tpfzepNwhgag2z+RJK3J/kfrbWvzepNwpjqYz6r6tgkX07y8tbaY1XVkpzYWts01/fLc9vnMdaMn6o6OMl/SLImyRGDxYdV1eLW2tNJPpXk6qr6QKa2Ll3bWvtRVb04ycFJbqmq3S+XZPG0l982kxUs8Gx9yGZV/WKSzyR5k8YUpvQhm0nSWnu4qj6VZENVLW+t7Zjzm4Mx13E+L0vyf7fWHhvaG2JGHNY7Wf5lkpMytVfk8EwdvpdMBTKDrbHbk/z9JOcn+f3B4w9m6lCJU1prLxz8vKC1dui017aLHWav02xW1c9n6hCot7XW/vsw3hBMiD6tN5ckeXGSw2f1TmDydJnPf5jkIzU1y/3WwbIbqur8Ob8rnpPmdHwdUFXLpv0sSXJYpsL46OAQoX+9l9/7dKaO13+qtfYXSdJa25mpSVL+w2BrU6pqeVWdsT8FVdWyJAcO7h44uA8LTa+yWVWnZmom7fe01j4/p3cG461v2Tynqk6qqkVVdVSSS5P8ZWvt4Tm9SxhPvcpnps5XXZmpc1ZfNlj2K0n+ZL/fGftFczq+1mcqsLt+/k2mDkE4KFNbjL6WqS+ke/r9JKcm+X/3WP6bSTYl+VpV/W2SP8/U1qr98YNMHeOfJN8e3IeFpm/Z/JdJjkryyap6YvBjQiQWor5lc/lgvMeTfDNTl2T71f34fZgkvcpna+2B1trWXT+DxQ+21ny3HTETIi0wg2m0H0iyqrV2V9f1AFNkE/pJNqG/5HPy2HO68LwryU0CDL0jm9BPsgn9JZ8TZuiz9VbVoiSXZOqE/ptba58a9hjMzmCq7kryhm4roSvy2U+yiWz2k2wim/0ln5NpRntOq+qqqnqgqjbusXxNVd1ZVZuq6qLB4rOTHJOpa11uGW65zEVr7fjW2k+31v6y61oYHvkcf7I5mWRz/MnmZJLNySCfk2mmh/Wuy9Q1hnarqsVJrkhyZpIVSc6rqhWZOtn4/2ut/YtM7WoHRmtd5BP6aF1kE/poXWQTemlGh/W21q6vquP3WHxakk2ttXuSpKquydTWpfsydc2hJHl6X69ZVRcmuTBJDjnkkJf/3M/93P5VDhPolltuebC1dtT+/M6w8ymb8Gx9yObg+fIJ08gm9Nds8jmXc06XZyqwu2xJ8ookH01yeVX9/STX7+uXW2trk6xNktWrV7ebb755DqXAZKiqvxnSS806n7IJz9aHbCbyCXuSTeiv2eRz6BMitda+n+SCYb8uMHfyCf0km9BPsgnzay6Xkrk/ybHT7h8zWAZ0Tz6hn2QT+kk2oQfm0pzelOTEqjqhqpYmOTfJdcMpC5gj+YR+kk3oJ9mEHpjppWSuTnJDkpOqaktVXdBa25Hk3Um+mOSOJNe21m4fXanA3sgn9JNsQj/JJvTXTGfrPW8fy9cnWT/UioD9Ip/QT7IJ/SSb0F9zOawXAAAAhkJzCgAAQOc0pwAAAHROcwoAAEDnNKcAAAB0TnMKAABA5zSnAAAAdE5zCgAAQOc0pwAAAHROcwoAAEDnNKcAAAB0TnMKAABA5zSnAAAAdE5zCgAAQOc0pwAAAHROcwoAAEDnNKcAAAB0TnMKAABA5zSnAAAAdE5zCgAAQOc0pwAAAHROcwoAAEDnNKcAAAB0TnMKAABA54benFbVa6rqf1bVlVX1mmG/PjB78gn9JJvQT7IJ82tGzWlVXVVVD1TVxj2Wr6mqO6tqU1VdNFjckjyRZFmSLcMtF9iTfEI/ySb0k2xCf810z+m6JGumL6iqxUmuSHJmkhVJzquqFUn+Z2vtzCS/meTi4ZUK7MO6yCf00brIJvTRusgm9NKMmtPW2vVJHt5j8WlJNrXW7mmtbU9yTZKzW2s7B48/kuTAoVUK7JV8Qj/JJvSTbEJ/LZnD7y5Pct+0+1uSvKKqzklyRpIXJvmP+/rlqrowyYVJctxxx82hDGAvZp1P2YSRsu6EfpJN6IG5NKd71Vr7XJLPzeB5a5OsTZLVq1e3YdcBPNtM8imbMP+sO6GfZBPm11xm670/ybHT7h8zWAZ0Tz6hn2QT+kk2oQfm0pzelOTEqjqhqpYmOTfJdcMpC5gj+YR+kk3oJ9mEHpjppWSuTnJDkpOqaktVXdBa25Hk3Um+mOSOJNe21m4fXanA3sgn9JNsQj/JJvTXjM45ba2dt4/l65OsH2pFwH6RT+gn2YR+kk3or7kc1gsAAABDoTkFAACgc5pTAAAAOqc5BQAAoHOaUwAAADqnOQUAAKBzmlMAAAA6pzkFAACgc5pTAAAAOqc5BQAAoHOaUwAAADqnOQUAAKBzmlMAAAA6pzkFAACgc5pTAAAAOqc5BQAAoHOaUwAAADqnOQUAAKBzmlMAAAA6pzkFAACgc5pTAAAAOqc5BQAAoHOaUwAAADqnOQUAAKBzmlMAAAA6N5LmtKoOqaqbq+p1o3h9YPbkE/pJNqGfZBPmz4ya06q6qqoeqKqNeyxfU1V3VtWmqrpo2kO/meTaYRYK7J18Qj/JJvSTbEJ/zXTP6boka6YvqKrFSa5IcmaSFUnOq6oVVfXLSb6V5IEh1gns27rIJ/TRusgm9NG6yCb00pKZPKm1dn1VHb/H4tOSbGqt3ZMkVXVNkrOTHJrkkEwF+wdVtb61tnPP16yqC5NcmCTHHXfcrN8ALHTDzqdswnBYd0I/ySb014ya031YnuS+afe3JHlFa+3dSVJV/zTJg3sLcJK01tYmWZskq1evbnOoA3i2WedTNmGkrDuhn2QTemAuzelzaq2tG9VrA3Mjn9BPsgn9JJswP+YyW+/9SY6ddv+YwTKge/IJ/SSb0E+yCT0wl+b0piQnVtUJVbU0yblJrhtOWcAcySf0k2xCP8km9MBMLyVzdZIbkpxUVVuq6oLW2o4k707yxSR3JLm2tXb76EoF9kY+oZ9kE/pJNqG/Zjpb73n7WL4+yfqhVgTsF/mEfpJN6CfZhP6ay2G9AAAAMBSaUwAAADqnOQUAAKBzmlMAAAA6pzkFAACgc5pTAAAAOqc5BQAAoHOaUwAAADqnOQUAAKBzmlMAAAA6pzkFAACgc5pTAAAAOqc5BQAAoHOaUwAAADqnOQUAAKBzmlMAAAA6pzkFAACgc5pTAAAAOqc5BQAAoHOaUwAAADqnOQUAAKBzmlMAAAA6pzkFAACgc5pTAAAAOjf05rSqTq6qK6vqs1X1rmG/PjB78gn9JJvQT7IJ82tGzWlVXVVVD1TVxj2Wr6mqO6tqU1VdlCSttTtaa+9M8mtJXjn8koHp5BP6STahn2QT+mume07XJVkzfUFVLU5yRZIzk6xIcl5VrRg89vokX0iyfmiVAvuyLvIJfbQusgl9tC6yCb00o+a0tXZ9kof3WHxakk2ttXtaa9uTXJPk7MHzr2utnZnkH+/rNavqwqq6uapu3rZt2+yqB4aeT9mE4bDuhH6STeivJXP43eVJ7pt2f0uSV1TVa5Kck+TAPMcWptba2iRrk2T16tVtDnUAzzbrfMomjJR1J/STbEIPzKU53avW2leSfGXYrwvMnXxCP8km9JNswvyay2y99yc5dtr9YwbLgO7JJ/STbEI/ySb0wFya05uSnFhVJ1TV0iTnJrluOGUBcySf0E+yCf0km9ADM72UzNVJbkhyUlVtqaoLWms7krw7yReT3JHk2tba7aMrFdgb+YR+kk3oJ9mE/prROaettfP2sXx9TKsNnZJP6CfZhH6STeivuRzWCwAAAEOhOQUAAKBzmlMAAAA6pzkFAACgc5pTAAAAOqc5BQAAoHOaUwAAADqnOQUAAKBzmlMAAAA6pzkFAACgc5pTAAAAOqc5BQAAoHOaUwAAADqnOQUAAKBzmlOAHtu+Y2daa12XAQAwcppTgJ567PtP5Wc/8Kf52Ffu7roUAICR05wC9NS2J36YJPnjW7d0XAkAwOhpTgF6q7ouAABg3mhOAQAA6JzmFKCnavvjuXfZ+Xn99j/tuhRgD+d87Kt563+6sesyACbKkq4LAGDvljyxNUnyq0/91yS/3W0xwI+5dfOjXZcAMHHsOQUAAKBzmlOA3nJ9UwBg4XBYL0DPlSYVeufldWee8jUKYKhG8n/VqnpDktcmOTzJJ1trXxrFOMD+kc0xUy4ls1DI5vj54wMvHtz6553WwWjJJsyvGR/WW1VXVdUDVbVxj+VrqurOqtpUVRclSWvtP7fW3p7knUn+9+GWDEwnm9BPsgn9JJvQX/tzzum6JGumL6iqxUmuSHJmkhVJzquqFdOe8oHB48DorItsQh+ti2xCH62LbEIvzbg5ba1dn+ThPRaflmRTa+2e1tr2JNckObum/HaSP22t3Tq8coE9ySb0k2xCP8km9NdcZ+tdnuS+afe3DJa9J8kvJXlTVb1zb79YVRdW1c1VdfO2bdvmWAawB9mcBM1ESBNo1tlM5BNGSDYnxKVfujPHX/SFbN+xs+tSmIWRTIjUWvvdJL/7PM9Zm2Rtkqxevdo3MJgHsjmeWkyMNOlmks3B8+QT5pFsjp+rvnpvkuSHO57O0iWumjlu5voXuz/JsdPuHzNYBnRLNqGfZBP6STYnxIvbg3nT4v9hs+6YmmtzelOSE6vqhKpamuTcJNfNvSxgjmRzgrjO6USRTegn2ZwQn6gP5t8f8P+kfvR416UwC/tzKZmrk9yQ5KSq2lJVF7TWdiR5d5IvJrkjybWttdtHUyqwN7I5wVzndKzJJvSTbE62l9Z3kiS1c3vHlTAbMz7ntLV23j6Wr0+yfmgVAftFNqGfZBP6STYXiEVLu66AWXCWMAAAMBGebMumbjj6aCxpTgEAgAlhnoZxpjkF6C1bfQFgf1hzjjfNKQAAMBFcG3y8aU4B+qo5NAkAZsMpp+NJcwrQc7YCA8BM2bA7zjSnAD1XVrQAsH/sOh1LmlOAvrJeBYBZaTtt2B1HmlMAAAA6pzkFAAAmSi1y+NE40pwCAADQOc0pQF85XQYAWEA0pwA951IyADAz1pjjTXMK0HMuJQMALASaU4C+svkXAGal2a47ljSn5C83P5LXfOTLefyHT3VdCgAAsEBpTsmH/+zO3PvQ9/ONLY91XQownc2+AMACojklbXA+myMIoZ9MiAQA+8kG3rGkOeWZ7Pr+CwDAGNOSjjfNKalBU2oDE/SNLUYAwMKhOWU3X4Ohb3Ydcm/LEQDMhO+z401zisN6AQCAzmlO2X1Yr50z0E8mRAIAFgLNKTns6cdyzqLruy4DAABYwIbenFbVS6vqk1X12WG/NqNx4WOX5dKlV+bAJ+7ruhRGTD6hn2QT+kk2x5cDAsfTjJrTqrqqqh6oqo17LF9TVXdW1aaquihJWmv3tNYuGEWxjMbRO76TJFn81JMdV8JsyOckczjvOJNN6CfZhP6a6Z7TdUnWTF9QVYuTXJHkzCQrkpxXVSuGWh3zYtf5bM02pnG1LvI5oXZ2XQBzsy6yCX20LrIJvTSj5rS1dn2Sh/dYfFqSTYMtStuTXJPk7CHXx7zYdaHTbqtgduQT+kk2oZ9kc6HwxXYczeWc0+VJpp+kuCXJ8qp6UVVdmeTnq+p9+/rlqrqwqm6uqpu3bds2hzKYq93RbUI8QWadT9mEkbLuhH6SzQnh2+x4WzLsF2ytPZTknTN43toka5Nk9erV/h11qNWubRQOIZx0M8mnbPaJc04XCutO6CfZhPk1lz2n9yc5dtr9YwbLGDO7r6Foz+kkkU/oJ9mEfpLNCWGz7nibS3N6U5ITq+qEqlqa5Nwk1w2nLObTrpZ0UWlOJ4h8Qj/JJvSTbEIPzPRSMlcnuSHJSVW1paouaK3tSPLuJF9MckeSa1trt4+uVEal7fpnYM/pWJJP6CfZhH6STeivGZ1z2lo7bx/L1ydZP9SKmHdtL7cYH/I5wWwwGmuyCf0km9Bfczmsl4kxdXR+NRMiAQAA3dCckp2aU+ilVqZ1AAAWDs0pu2frLZeSgV7RmgIAC4nmlOz+CmzPKfSKM04BgIVEc0raoDetnU93WwjwY8qESADAAqI5Jc8cPOiLMAAA0A3NKdOuVqE5hT7ZNSFSySYA7BcHH40nzSm7ma0X+qV2NafWsACwf6w7x5LmlN3snYF+2bVedUkZAGAh0JzyTFNqCxMAANARzSnTaE6hT+wwBYD949vseNOcsnvPqfPaoGdEEgBYQDSnZNHub8C+CUMftdiFCgBMPs0pqQxm6bXnFAAA6IjmlGlcSgYAgPHlWKPxpjnFbL3Qc84HBwAWAs0pu/kCDP1itl4AYCHRnDKN5hT6ZFdvakIkAGAh0JwCAADQOc0p09hzCgAAdENzCgAAQOc0p+xmvykAANAVzSm7ma0X+qlsOgIAFgDNKUDPma0XAFgINKdMY+8M9JE9pwDAQqA5BeipWvTMlU4BACad5pRnOOcUesbhvADAwqE5ZTetKQAA0BXNKbuV7hR6pTmaAQBYQDSnAAAAdE5zyjT20kCfSCQAsJBoTgF6z8RIAMDk05yyW8vOrksAAAAWqCXDfsGqOiTJx5JsT/KV1tofDHsMRsQxhBNPPseL+ZAWDtmEfpJNmF8z2nNaVVdV1QNVtXGP5Wuq6s6q2lRVFw0Wn5Pks621tyd5/ZDrZQSeOWDQN+FxJJ+Ta8nj9ydJjmv3d1wJsyGb0E+yCf0108N61yVZM31BVS1OckWSM5OsSHJeVa1IckyS+wZPe3o4ZTIftKZja13kcyId+PAdXZfA3KyLbEIfrYtsQi/NqDltrV2f5OE9Fp+WZFNr7Z7W2vYk1yQ5O8mWTAV5xq9PP7jO6XiSz0lmIqRxJpvQT7IJ/TWXkC3PM1uSkqnwLk/yuSRvrKqPJ/n8vn65qi6sqpur6uZt27bNoQzmqu3+Aqw7nSCzzqds9kcrzekEsu6EfpLNCeNb7Xga+oRIrbUnk7x1Bs9bm2Rtkqxevdq/nx5oZl+ZeDPJp2z2R8nkgmHdCf0kmzC/5rLn9P4kx067f8xgGWOqbGOaJPI5AVo5gmwCySb0k2xOHN9rx9FcvvnclOTEqjqhqpYmOTfJdcMpiy40IZ4k8jkBfrD8f0mSPFaHdVwJQySb0E+yCT0w00vJXJ3khiQnVdWWqrqgtbYjybuTfDHJHUmuba3dPrpSgb2Rz8n1dJs65/QF7fGOK2E2ZBP6STahv2Z0zmlr7bx9LF+fZP1QK6I7zm8bS/I5uXY89VTXJTAHsgn9JJvQX05o4hmaU+gVs/UCAAuJ5pRnaE6hV9riA5Mk29vijisBABg9zSm7uZQM9M3UntP72os7rgMAYPQ0pyS7Dx3UnEKfHLx06n/Ri7Oz40oAAEZPc8pu9pxCvyz77o1JkuMXfa/jSgAARk9zSnYdOuicU+iXxU9s7boEAIB5ozllGocOQp/YXAQALCSaU575AmzPKfTKvQ8+2XUJAADzRnPKM5o9p9Anm1/wC12XAAAwbzSnxDmn0E+///X7ui4BAMaU77XjSHNK2q5LybSnuy0E+DGfO/DfdF0CAIyVh3N41yUwB0NvTqtqTVXdWVWbquqiYb8+I7RTczrp5BP6STahn2Rz/Bzwyn+WJDnkgCUdV8JsDLU5rarFSa5IcmaSFUnOq6oVwxyDUZjac1o7ftBxHYySfI6fbe2FXZfAPJBN6CfZHE8v+QfvTP7VX2fxQS/ouhRmYdibFE5Lsqm1dk+SVNU1Sc5O8q09n1hVFya5cHD3R1W1cci17I8jkzzY4fh9qOHI5EMP5vwPdVhCHz6Dzv8dnDTC155RPnuWzaT7v0vX40/VcHH5DLqtofNsDh7rUz67/pv0oQbZ7L4G2Xy2rv8mfaih6/H7UEPX4yezyOewm9PlSabP4LElySv29sTW2toka5Okqm5ura0eci0z1vX4faih6/H7UEPX4++qYYQvP6N89imbfaih6/H7UEPX4/ehhj5kM+lXPrsevw81LPTx+1CDbD5b1+P3oYaux+9DDV2Pv6uG/f0dEyIBAADQuWE3p/cnOXba/WMGy4DuySf0k2xCP8kmzLNhN6c3JTmxqk6oqqVJzk1y3Qx+b+2Q69hfXY+fdF9D1+Mn3dfQ9fjJaGuYTT4n/TMZh/GT7mvoevyk+xr6ls1R1zQTXY+fdF/DQh8/6b4G2ezf+En3NXQ9ftJ9DV2Pn8yihmptuBeoraqzklyWZHGSq1pr/3aoAwCzJp/QT7IJ/SSbML+G3pwCAADA/jIhEgAAAJ3rtDmtqkuq6htVdVtVfamqfmqwvKrqd6tq0+DxVSMa/yNV9e3BGH9SVS+c9tj7BuPfWVVnjGL8wTj/qKpur6qdVbV6j8fmq4Y1gzE2VdVFoxpnjzGvqqoHpl8HrKp+oqr+W1XdNfjvESMc/9iq+nJVfWvw+b93PmuoqmVVdWNVbRiMf/Fg+QlV9fXB3+IPB+e4zDvZlE3ZlM3nqKHTfMrmwszmYCz5fO7xF3w2B2PJ5zivO1trnf0kOXza7X+e5MrB7bOS/GmSSvKLSb4+ovH/tyRLBrd/O8lvD26vSLIhyYFJTkhyd5LFI6rh5ExdoPYrSVZPWz4vNWTqHIq7k7w0ydLBmCvm4W//qiSrkmyctuzDSS4a3L5o199jROP/ZJJVg9uHJfmrwWc+LzUM/m0fOrh9QJKvD/6tX5vk3MHyK5O8a9R/i33UJ5uyKZuyua8aOs2nbC7MbA5eXz6fe/wFnc3BWPI55uvOTvecttb+dtrdQ5LsOgH27CSfblO+luSFVfWTIxj/S621HYO7X8vUFOG7xr+mtfaj1tpfJ9mU5LRhjz+o4Y7W2p17eWi+ajgtyabW2j2tte1JrhmMPVKtteuTPLzH4rOTfGpw+1NJ3jDC8b/bWrt1cPvxJHdk6mLb81LD4N/2E4O7Bwx+WpL/NclnRz3+85FN2dxjsWzK5vQaOs2nbP6YBZPNwbjy+dzjL/RsJvI59uvOzs85rap/W1X3JfnHSX5rsHh5kvumPW3LYNkovS1TW7W6Gn9P81VDH97rLi9prX13cHtrkpfMx6BVdXySn8/UVp55q6GqFlfVbUkeSPLfMrWl79FpK5Yu/xayuW+yKZuy+Yw+5VM2Jzybg7Hlc2YWYjbne6zns6DyOaxsjrw5rao/r6qNe/k5O0laa+9vrR2b5A+SvHu+xx885/1JdgxqGLqZ1MCPa1P7/0c+lXRVHZrkj5P8xh5bPEdeQ2vt6dbayzK1ZfO0JD83qrH2RjZlczZkc/S6zuZMahg8Z2T5lM39txCyORhjQedTNsfTQsjnsLK5ZJhF7U1r7Zdm+NQ/SLI+yb9Ocn+SY6c9dsxg2dDHr6p/muR1Sf7h4I+WYY4/kxr2Yag19GCcmfheVf1ka+27g8NdHhjlYFV1QKYC/Aettc91UUOStNYeraovJzk9U4f6LBlsZRrp30I2ZXM/yOYCyuZMahh1PmVzxhZkNpOFm0/Z7NVYz2dB5nOu2ex6tt4Tp909O8m3B7evS/KWmvKLSR6btkt6mOOvSfKvkry+tfb9aQ9dl+Tcqjqwqk5IcmKSG4c9/vOYrxpuSnJiTc2mtTTJuYOxu3Bdkn8yuP1PkvyXUQ1UVZXkk0nuaK1dOt81VNVRNZhFr6oOSvLLmTo/4MtJ3jTq8WdQn2zum2zK5oLN5qCGvuZTNic4m4Ma5PO5x1/o2Uzkc/zXnW3Es1c910+muvuNSb6R5PNJlrdnZny6IlPHKn8z02b8GvL4mzJ1XPptg58rpz32/sH4dyY5c4Sfwa9m6hjsHyX5XpIvdlDDWZma1evuJO+fp7/91Um+m+Spwfu/IMmLkvz3JHcl+fMkPzHC8f9epg5t+Ma0v/9Z81VDkr+b5C8H429M8luD5S/N1P+wNyX5oyQHzsffYy/1yaZsyqZs7quGTvMpmwszm4Ma5PO5x1/w2RyMJZ9jvO6swS8CAABAZzqfrRcAAAA0pwAAAHROcwoAAEDnNKcAAAB0TnMKAABA5zSnAAAAdE5zCgAAQOc0pwAAAHROcwoAAEDnNKcAAAB0TnMKAABA5zSnAAAAdE5zCgAAQOc0pwAAAHRuSdcFAPTFLbfc8uIlS5Z8IsmpsfGOmduZZOOOHTt+/eUvf/kDXRcDAONKcwowsGTJkk8cffTRJx911FGPLFq0qHVdD+Nh586dtW3bthVbt279RJLXd10PAIwrewYAnnHqUUcd9bcaU/bHokWL2lFHHfVYpva4AwCzpDkFeMYijSmzMfh3Y50KAHNgRQoAAEDnNKcAAAB0TnMKMMZuvPHGg4488siVN95440Fd1zIOfF4A0F+aU4Axdskllxx9/fXX33HJJZcc3XUt48DnBQD9Va2Z+wMgSTZs2HDvypUrH+y6DsbThg0bjly5cuXxXdcBAOPKnlMAAAA6pzkFGGMf+tCHjnrb2952bNd1jAufFwD015KuCwDgGT/zMz9zymOPPbb4oIMO2rlr2UMPPXTA2972tgcuv/zy+/d8/je/+c2DTzvttCfmt8r+8HkBwOSw5xSgR9785jdve+Mb3/jw5s2bN27evHnjvffeu/HII4986l3vete2vT3/jjvuOGjVqlU/mO86+8LnBQCTw55TgL34vz674di/2vr4wcN8zZ89+rDvf+RNK+97rue84x3veGjVqlUrLr/88i0HHHBAvvCFLxy2fPnyH61YsWL7ns/duXNn7r777mW9aLb+8z87Ng98a6ifV1684vt5wxWT+XkBAM9izylAjxx99NFPr1q16slrrrnmhUnyiU984si3vvWte51B+Nvf/vbSF73oRU8deuihC3badZ8XAEwOe04B9uL59nCO0tvf/vZtH/3oR19y1llnPX7jjTce+od/+If3JskHPvCBlzz00ENLtm/fvuiTn/zkfbfeeuvBJ5988rP2Ap566qknr1q16sm77rpr2eWXX775q1/96iG33nrrwa21OuSQQ57++Mc//qxzMefsefZwjtIwPq+VK1c+effddy9bv379ple96lUnrVq16snHH3980atf/erHf+M3fuOhU0455eSVK1c+mSTveMc7Hnz1q1/9/Xl+mwAw8TSnAD3zK7/yK4+/973v/elLLrnkJa973eseWbZsWfv85z9/2KGHHrrzgx/84P2ve93rXpokGzZsOOjUU0/9sWZr06ZNB7zsZS978tOf/vTmiy+++MVXX331EY899tjiT33qU/clyQ9/+MPq4j2N0lw/r9NPP/3x3/u939vya7/2az/94IMPLlm1atWTn/70pzcnySte8YqfPeussx5fuXLlk5/5zGc2d/H+AGCh0JwC9MyiRYty3nnnPfjhD394+U033XR7kvzRH/3REU8++eSi888//6DNmzcvTZLbbrvt4He+850/NvHPDTfccMg999yz7Pzzzz/ue9/73gEveclLdlx88cXf3fX4smXLJu6Q1rl+XnfdddeyCy644NjTTz/9ya9//esHv/zlL39y1+MHH3zwzq997WsH33XXXQedf/75xx199NE7Lr300u/M7zsEgIVBcwrQQ+973/seOOeccx5dtWrVD5Pk0UcfXXzdddf99V133bX0sssuO+pv/uZvDvjWt7518BlnnPH49N+7+eabD/6d3/md+04//fQfnHHGGX/nkUceWXzAAQfsbkh37NiRJUsm73/9c/m8PvrRj963cuXKHyXJe9/73p86//zzH0mSG2644aDly5dvv+WWWw6+7LLLNr/yla80kRIAjNDkfUMBmACHH374zl/4hV/44a77r33tax9785vffNzSpUvbEUccsWPNmjUnXnrppZv3nNzntttuO/jBBx9c8rGPfSwnnXTSD97ylrc8/J73vOeYF73oRTueeOKJxVdeeeV9Rx555NPz/45Ga7af17e//e2DTjnllB/tun/bbbcd/NBDDy1Zu3ZtW7x4cbv88su3vOENb3jp1q1bD7jiiivamjVr/vYtb3nLo/P41gBgwajWJu4IL4BZ2bBhw70rV67c60yv8Hw2bNhw5MqVK4/vug4AGFcuJQMAAEDnNKcAAAB0TnMKAABA5zSnAM/YuXPnzom7DiijN/h3s7PrOgBgnGlOAZ6xcdu2bS/QoLI/du7cWdu2bXtBko1d1wIA48ylZAAGduzY8etbt279xNatW0+NjXfM3M4kG3fs2PHrXRcCAOPMpWQAAADonD0DAAAAdE5zCgAAQOc0pwAAAHROcwoAAEDnNKcAAAB07v8HQQsahRwryZ4AAAAASUVORK5CYII=\n",
      "text/plain": [
       "<Figure size 936x288 with 4 Axes>"
      ]
     },
     "metadata": {
      "needs_background": "light"
     },
     "output_type": "display_data"
    }
   ],
   "source": [
    "#Plot the gradients\n",
    "#seaborn = 0.9.0\n",
    "\n",
    "cnt = 1\n",
    "fig = plt.figure(4, figsize=(13, 4))\n",
    "\n",
    "for i in range (L):\n",
    "    \n",
    "    ax = plt.subplot(1, 4, cnt)\n",
    "    ax.set_title('Layer {}'.format(i + 1))\n",
    "    ax.set_yscale('symlog')\n",
    "    \n",
    "    gradients_res = f_2['f'+ str(i)]\n",
    "    gradients_bcs = u_2['u'+ str(i)]\n",
    "      \n",
    "    sns.distplot(gradients_bcs, hist=False,\n",
    "                 kde_kws={\"shade\": False},\n",
    "                 norm_hist=False, label=r'$\\nabla_{\\theta} \\hat J_{BC}$')\n",
    "    \n",
    "    sns.distplot(gradients_res, hist=False,\n",
    "                 kde_kws={\"shade\": False},\n",
    "                 norm_hist=False, label=r'$\\nabla_{\\theta} \\hat J_{PDE}$')\n",
    "\n",
    "    ax.get_legend().remove()\n",
    "    ax.set_xlim([-3e1, 3e1])\n",
    "    ax.set_ylim([0,1e6])\n",
    "    cnt += 1\n",
    "    \n",
    "handles, labels = ax.get_legend_handles_labels()\n",
    "fig.legend(handles, labels, loc=\"upper left\", bbox_to_anchor=(0.35, -0.01),\n",
    "             borderaxespad=0, bbox_transform=fig.transFigure, ncol=2)\n",
    "plt.tight_layout()\n",
    "plt.show()\n",
    "\n",
    "# fig.savefig('Gradient_histogram_stiff.png', dpi = 500, bbox_inches='tight')"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
