{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "view-in-github"
   },
   "source": [
    "<a href=\"https://colab.research.google.com/github/mfrdixon/ML_Finance_Codes/blob/master/Wealth_Management_GIRL.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "Vc1tLT83PQMd"
   },
   "outputs": [],
   "source": [
    "# ML_in_Finance-GIRL-wealth-management\n",
    "# Author: Igor Halperin and Matthew Dixon\n",
    "# Version: 1.0 (12.8.2019)\n",
    "# License: MIT\n",
    "# Email: ighalp@gmail.com\n",
    "# Notes: tested on Mac OS X with Python 3.6 and PyTorch 1.3.0\n",
    "# Citation: Please cite the following reference if this notebook is used for research purposes:\n",
    "# Dixon M.F., I. Halperin and P. Bilokon, Machine Learning in Finance: From Theory to Practice, Springer Graduate textbook Series, 2020. "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "uXdeHDmEPQM6"
   },
   "source": [
    "## G-learing and GIRL for wealth optimization\n",
    "\n",
    "This notebook demonstrates the application of G-learning and GIRL for optimization of a defined contribution retirement plan. The notebook extends the G-learning notebook in Chapter 10 with an example of applying GIRL to infer the parameters of the G-learner used to generate the trajectories."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "vOoE16hWPQNC"
   },
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import matplotlib\n",
    "matplotlib.use('Agg')\n",
    "import matplotlib.pyplot as plt \n",
    "%matplotlib inline\n",
    "\n",
    "import time\n",
    "from scipy.optimize import minimize\n",
    "import torch\n",
    "\n",
    "import torch.optim as optim\n",
    "\n",
    "import warnings\n",
    "warnings.filterwarnings('ignore')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 34
    },
    "colab_type": "code",
    "id": "M-6MadDnPQNO",
    "outputId": "176e0a88-271e-4be2-9d69-1f13e1520534"
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'1.5.0'"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.__version__"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 34
    },
    "colab_type": "code",
    "id": "PHyhUTpJ4nl1",
    "outputId": "18b1c78b-d6c6-4453-9755-65390364ab53"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "env: KMP_DUPLICATE_LIB_OK=TRUE\n"
     ]
    }
   ],
   "source": [
    "%env KMP_DUPLICATE_LIB_OK=TRUE"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "S0yKeMU9PQNa"
   },
   "outputs": [],
   "source": [
    "# set the device\n",
    "device = 'cuda' if torch.cuda.is_available() else 'cpu'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "qLivr9yuPQON"
   },
   "outputs": [],
   "source": [
    "# Define the G-learning portfolio optimization class\n",
    "class G_learning_portfolio_opt:\n",
    "    \n",
    "    def __init__(self, \n",
    "                 num_steps,\n",
    "                 params,\n",
    "                 beta,\n",
    "                 benchmark_portf,\n",
    "                 gamma, \n",
    "                 num_risky_assets,\n",
    "                 riskfree_rate,\n",
    "                 exp_returns, # array of shape num_steps x num_stocks\n",
    "                 Sigma_r,     # covariance matrix of returns of risky assets\n",
    "                 init_x_vals, # array of initial asset position values (num_risky_assets + 1)\n",
    "                 use_for_WM = True): # use for wealth management tasks\n",
    "\n",
    "                \n",
    "        self.num_steps = num_steps\n",
    "        self.num_assets = num_risky_assets + 1 # exp_returns.shape[1]\n",
    "        \n",
    "        self.lambd = torch.tensor(params[0], requires_grad=False, dtype=torch.float64)\n",
    "        self.Omega_mat = params[1] * torch.eye(self.num_assets,dtype=torch.float64)\n",
    "        #self.Omega_mat = torch.tensor(Omega_mat,requires_grad=False, dtype=torch.float64)\n",
    "        self.eta = torch.tensor(params[2], requires_grad=False, dtype=torch.float64)\n",
    "        self.rho = torch.tensor(params[3], requires_grad=False, dtype=torch.float64)\n",
    "        self.beta = torch.tensor(beta, requires_grad=False, dtype=torch.float64)\n",
    "        \n",
    "        self.gamma = gamma\n",
    "        self.use_for_WM = use_for_WM\n",
    "        \n",
    "        self.num_risky_assets = num_risky_assets\n",
    "        self.r_f = riskfree_rate\n",
    "        \n",
    "        \n",
    "        assert exp_returns.shape[0] == self.num_steps\n",
    "        assert Sigma_r.shape[0] == Sigma_r.shape[1]\n",
    "        assert Sigma_r.shape[0] == num_risky_assets # self.num_assets\n",
    "        \n",
    "        self.Sigma_r_np = Sigma_r # array of shape num_stocks x num_stocks\n",
    "        \n",
    "        self.reg_mat = 1e-3*torch.eye(self.num_assets, dtype=torch.float64)\n",
    "        \n",
    "        # arrays of returns for all assets including the risk-free asset\n",
    "        # array of shape num_steps x (num_stocks + 1) \n",
    "        self.exp_returns_np = np.hstack((self.r_f * np.ones(self.num_steps).reshape((-1,1)), exp_returns))\n",
    "                                      \n",
    "        # make block-matrix Sigma_r_tilde with Sigma_r_tilde[0,0] = 0, and equity correlation matrix inside\n",
    "        self.Sigma_r_tilde_np = np.zeros((self.num_assets, self.num_assets))\n",
    "        self.Sigma_r_tilde_np[1:,1:] = self.Sigma_r_np\n",
    "            \n",
    "        # make Torch tensors  \n",
    "        self.exp_returns = torch.tensor(self.exp_returns_np,requires_grad=False, dtype=torch.float64)\n",
    "        self.Sigma_r = torch.tensor(Sigma_r,requires_grad=False, dtype=torch.float64)\n",
    "        self.Sigma_r_tilde = torch.tensor(self.Sigma_r_tilde_np,requires_grad=False, dtype=torch.float64)\n",
    "        \n",
    "        self.benchmark_portf = torch.tensor(benchmark_portf, requires_grad=False, dtype=torch.float64)\n",
    "        \n",
    "        # asset holding values for all times. Initialize with initial values, \n",
    "        # values for the future times will be expected values \n",
    "        self.x_vals_np = np.zeros((self.num_steps, self.num_assets))\n",
    "        self.x_vals_np[0,:] = init_x_vals \n",
    "        \n",
    "        # Torch tensor\n",
    "        self.x_vals = torch.tensor(self.x_vals_np)\n",
    "                \n",
    "        # allocate memory for coefficients of R-, F- and G-functions        \n",
    "        self.F_xx = torch.zeros(self.num_steps, self.num_assets, self.num_assets, dtype=torch.float64,\n",
    "                                requires_grad=True)\n",
    "        self.F_x = torch.zeros(self.num_steps, self.num_assets, dtype=torch.float64,\n",
    "                               requires_grad=True)\n",
    "        self.F_0 = torch.zeros(self.num_steps,dtype=torch.float64,requires_grad=True)\n",
    "        \n",
    "        self.Q_xx = torch.zeros(self.num_steps, self.num_assets, self.num_assets,dtype=torch.float64,\n",
    "                                requires_grad=True)\n",
    "        self.Q_uu = torch.zeros(self.num_steps, self.num_assets, self.num_assets,dtype=torch.float64,\n",
    "                                requires_grad=True)\n",
    "        self.Q_ux = torch.zeros(self.num_steps, self.num_assets, self.num_assets,dtype=torch.float64,\n",
    "                                requires_grad=True)\n",
    "        self.Q_x = torch.zeros(self.num_steps, self.num_assets,dtype=torch.float64,requires_grad=True)\n",
    "        self.Q_u = torch.zeros(self.num_steps, self.num_assets,dtype=torch.float64,requires_grad=True)\n",
    "        self.Q_0 = torch.zeros(self.num_steps,dtype=torch.float64,requires_grad=True)\n",
    "        \n",
    "        self.R_xx = torch.zeros(self.num_steps, self.num_assets, self.num_assets,dtype=torch.float64,\n",
    "                                requires_grad=True)\n",
    "        self.R_uu = torch.zeros(self.num_steps, self.num_assets, self.num_assets,dtype=torch.float64,\n",
    "                                requires_grad=True)\n",
    "        self.R_ux = torch.zeros(self.num_steps, self.num_assets, self.num_assets,dtype=torch.float64,\n",
    "                                requires_grad=True)\n",
    "        self.R_x = torch.zeros(self.num_steps, self.num_assets,dtype=torch.float64,requires_grad=True)\n",
    "        self.R_u = torch.zeros(self.num_steps, self.num_assets,dtype=torch.float64,requires_grad=True)\n",
    "        self.R_0 = torch.zeros(self.num_steps,dtype=torch.float64,requires_grad=True)\n",
    "\n",
    "        \n",
    "        self.reset_prior_policy()\n",
    "        \n",
    "        # the list of adjustable model parameters:\n",
    "        self.model_params = [self.lambd, self.beta, self.Omega_mat, self.eta]  \n",
    "#                              self.exp_returns, self.Sigma_r_tilde,self.Sigma_prior_inv, self.u_bar_prior]\n",
    "        \n",
    "        \n",
    "        # expected cash installment for all steps\n",
    "        self.expected_c_t = torch.zeros(self.num_steps,dtype=torch.float64)\n",
    "        \n",
    "        # realized values of the target portfolio\n",
    "        self.realized_target_portf = np.zeros(self.num_steps,dtype=np.float64)\n",
    "        \n",
    "        # expected portfolio values for all times\n",
    "        self.expected_portf_val = torch.zeros(self.num_steps,dtype=torch.float64)\n",
    "        \n",
    "        # the first value is the sum of initial position values\n",
    "        self.expected_portf_val[0] = self.x_vals[0,:].sum()\n",
    "\n",
    "    def reset_prior_policy(self):\n",
    "        # initialize time-dependent parameters of prior policy \n",
    "        self.u_bar_prior = torch.zeros(self.num_steps,self.num_assets,requires_grad=False,\n",
    "                                       dtype=torch.float64)\n",
    "        self.v_bar_prior =  torch.zeros(self.num_steps, self.num_assets, self.num_assets,requires_grad=False,\n",
    "                                        dtype=torch.float64)\n",
    "        self.Sigma_prior =  torch.zeros(self.num_steps, self.num_assets, self.num_assets,requires_grad=False,\n",
    "                                        dtype=torch.float64)\n",
    "        self.Sigma_prior_inv = torch.zeros(self.num_steps, self.num_assets, self.num_assets,requires_grad=False,\n",
    "                                        dtype=torch.float64)\n",
    "        \n",
    "        # make each time elements of v_bar_prior and Sigma_prior proportional to the unit matrix\n",
    "        for t in range(self.num_steps):\n",
    "            self.v_bar_prior[t,:,:] = 0.1 * torch.eye(self.num_assets).clone()\n",
    "            self.Sigma_prior[t,:,:] = 0.1 * torch.eye(self.num_assets).clone()\n",
    "            self.Sigma_prior_inv[t,:,:] = 10.0 * torch.eye(self.num_assets).clone() # np.linalg.inv(self.Sigma_prior[t,:,:])\n",
    "    \n",
    "    def reward_fun(self, t, x_vals, u_vals, exp_rets, lambd, Sigma_hat):\n",
    "        \"\"\"\n",
    "        The reward function \n",
    "        \"\"\"\n",
    "        x_plus = x_vals + u_vals\n",
    "        \n",
    "        p_hat = self.rho.clone() * self.benchmark_portf[t] + (1-self.rho.clone())*self.eta.clone()*x_vals.sum()\n",
    "        \n",
    "        aux_1 = - self.lambd.clone() * p_hat**2         \n",
    "        aux_2 = - u_vals.sum()   \n",
    "        aux_3 = 2*self.lambd.clone() * p_hat * x_plus.dot(torch.ones(num_assets) + exp_rets)\n",
    "        aux_4 = - self.lambd.clone() * x_plus.mm(Sigma_hat.mv(x_plus))\n",
    "        aux_5 = - u_vals.mm(self.Omega_mat.clone().mv(u_vals))\n",
    "        \n",
    "        return aux_1 + aux_2 + aux_3 + aux_4 + aux_5  \n",
    "    \n",
    "    def compute_reward_fun(self):\n",
    "        \"\"\"\n",
    "        Compute coefficients R_xx, R_ux, etc. for all steps\n",
    "        \"\"\"\n",
    "        for t in range(0, self.num_steps):\n",
    "            \n",
    "            one_plus_exp_ret = torch.ones(self.num_assets,dtype=torch.float64) + self.exp_returns[t,:]\n",
    "            benchmark_portf = self.benchmark_portf[t]\n",
    "            Sigma_hat = self.Sigma_r_tilde + torch.ger(one_plus_exp_ret, one_plus_exp_ret)\n",
    "            \n",
    "            one_plus_exp_ret_by_one = torch.ger(one_plus_exp_ret,torch.ones(self.num_assets,dtype=torch.float64))\n",
    "            one_plus_exp_ret_by_one_T = one_plus_exp_ret_by_one.t()     \n",
    "            one_one_T_mat = torch.ones(self.num_assets,self.num_assets)\n",
    "            \n",
    "            self.R_xx[t,:,:] = (-self.lambd.clone()*(self.eta.clone()**2)*(self.rho.clone()**2)*one_one_T_mat\n",
    "                                 + 2*self.lambd.clone()*self.eta.clone()*self.rho.clone()*one_plus_exp_ret_by_one\n",
    "                                 - self.lambd.clone()*Sigma_hat)\n",
    "            \n",
    "            self.R_ux[t,:,:] = (2*self.lambd.clone()*self.eta.clone()*self.rho.clone()*one_plus_exp_ret_by_one\n",
    "                                 - 2*self.lambd.clone()*Sigma_hat)\n",
    "            \n",
    "            self.R_uu[t,:,:] = - self.lambd.clone() * Sigma_hat - self.Omega_mat.clone()\n",
    "            \n",
    "            self.R_x[t,:] =  (-2*self.lambd.clone()*self.eta.clone()*self.rho.clone()*(1-self.rho.clone())*benchmark_portf *\n",
    "                                 torch.ones(self.num_assets,dtype=torch.float64)\n",
    "                                 + 2*self.lambd.clone()*(1-self.rho.clone())*benchmark_portf * one_plus_exp_ret)\n",
    "            \n",
    "            self.R_u[t,:] = (2*self.lambd.clone()*(1-self.rho.clone())*benchmark_portf * one_plus_exp_ret\n",
    "                             - torch.ones(self.num_assets,dtype=torch.float64))\n",
    "            \n",
    "            self.R_0[t] = - self.lambd.clone()*((1-self.rho.clone())**2) * (benchmark_portf**2)\n",
    "                \n",
    "         \n",
    "    def project_cash_injections(self):\n",
    "        \"\"\"\n",
    "        Compute the expected values of future asset positions, and the expected cash injection for future steps,\n",
    "        as well as realized values of the target portfolio\n",
    "        \"\"\"\n",
    "           \n",
    "        # this assumes that the policy is trained\n",
    "        for t in range(1, self.num_steps):  # the initial value is fixed \n",
    "            \n",
    "            # increment the previous x_t\n",
    "            \n",
    "            delta_x_t = self.u_bar_prior[t,:] + self.v_bar_prior[t,:,:].mv(self.x_vals[t-1,:])\n",
    "            self.x_vals[t,:] = self.x_vals[t-1,:] + delta_x_t\n",
    "            \n",
    "            # grow using the expected return\n",
    "            self.x_vals[t,:] = (torch.ones(self.num_assets)+ self.exp_returns[t,:])*self.x_vals[t,:]\n",
    "            \n",
    "            # compute c_t\n",
    "            self.expected_c_t[t] = delta_x_t.sum().data # detach().numpy()\n",
    "            \n",
    "            # expected portfolio value for this step\n",
    "            self.expected_portf_val[t] = self.x_vals[t,:].sum().data # .detach().numpy()\n",
    "    \n",
    "            \n",
    "                                                                                      \n",
    "    def set_terminal_conditions(self):\n",
    "        \"\"\"\n",
    "        set the terminal condition for the F-function\n",
    "        \"\"\"\n",
    "        \n",
    "        # the auxiliary quantity to perform matrix calculations\n",
    "        one_plus_exp_ret = torch.ones(self.num_assets,dtype=torch.float64) + self.exp_returns[-1,:]\n",
    "        \n",
    "        \n",
    "        # Compute the reward function for all steps (only the last step is needed for this functions, while \n",
    "        # values for other time steps will be used in other functions)\n",
    "        self.compute_reward_fun()\n",
    "        \n",
    "        if self.use_for_WM:\n",
    "\n",
    "            Sigma_hat = self.Sigma_r_tilde + torch.ger(one_plus_exp_ret, one_plus_exp_ret)\n",
    "            Sigma_hat_inv = torch.inverse(Sigma_hat + self.reg_mat)\n",
    "            \n",
    "            Sigma_tilde = Sigma_hat + (1/self.lambd)*self.Omega_mat.clone()\n",
    "            #Sigma_tilde_inv = torch.pinverse(Sigma_tilde)\n",
    "            Sigma_tilde_inv = torch.inverse(Sigma_tilde + self.reg_mat)\n",
    "            \n",
    "            Sigma_hat_sigma_tilde = Sigma_hat.mm(Sigma_tilde)\n",
    "            Sigma_tilde_inv_sig_hat = Sigma_tilde_inv.mm(Sigma_hat)\n",
    "            Sigma_tilde_sigma_hat = Sigma_tilde.mm(Sigma_hat)\n",
    "            \n",
    "            Sigma_hat_Sigma_tilde_inv = Sigma_hat.mm(Sigma_tilde_inv)\n",
    "            Sigma_3_plus_omega = self.lambd*Sigma_tilde_inv.mm(Sigma_hat_Sigma_tilde_inv) + self.Omega_mat.clone()    \n",
    "                             \n",
    "            one_plus_exp_ret_by_one = torch.ger(one_plus_exp_ret,torch.ones(self.num_assets,dtype=torch.float64))\n",
    "            one_plus_exp_ret_by_one_T = one_plus_exp_ret_by_one.t()     \n",
    "            one_one_T_mat = torch.ones(self.num_assets,self.num_assets)\n",
    "            \n",
    "            Sigma_tilde_inv_t_R_ux = Sigma_tilde_inv.t().mm(self.R_ux[-1,:,:].clone())\n",
    "            Sigma_tilde_inv_t_R_uu = Sigma_tilde_inv.t().mm(self.R_uu[-1,:,:].clone())\n",
    "            Sigma_tilde_inv_t_R_u = Sigma_tilde_inv.t().mv(self.R_u[-1,:].clone())\n",
    "            \n",
    "            Sigma_tilde_inv_R_u = Sigma_tilde_inv.mv(self.R_u[-1,:].clone())\n",
    "            Sigma_tilde_inv_R_ux = Sigma_tilde_inv.mm(self.R_ux[-1,:,:].clone())\n",
    "            Sigma_tilde_inv_t_R_uu = Sigma_tilde_inv.mm(self.R_uu[-1,:,:].clone())\n",
    "            \n",
    "            # though the action at the last step is deterministic, we can feed \n",
    "            # parameters of the prior with these values                     \n",
    "              \n",
    "            self.u_bar_prior[-1,:]   = (1/(2 * self.lambd.clone()))* Sigma_tilde_inv.clone().mv(self.R_u[-1,:].clone())\n",
    "            self.v_bar_prior[-1,:,:] = (1/(2 * self.lambd.clone()))* Sigma_tilde_inv.clone().mm(self.R_ux[-1,:,:].clone())    \n",
    "                \n",
    "            # First compute the coefficients of the reward function at the last step\n",
    "\n",
    "            \n",
    "            # the coefficients of F-function for the last step\n",
    "            \n",
    "            # F_xx                 \n",
    "            self.F_xx[-1,:,:] = (self.R_xx[-1,:,:].clone()\n",
    "                                 + (1/(2*self.lambd.clone()))* self.R_ux[-1,:,:].clone().t().mm(Sigma_tilde_inv_t_R_ux)\n",
    "                                 + (1/(4*self.lambd.clone()**2))* self.R_ux[-1,:,:].clone().t().mm(\n",
    "                                      Sigma_tilde_inv_t_R_uu.clone().mm(Sigma_tilde_inv.clone().mm(self.R_ux[-1,:,:].clone())))\n",
    "                                )\n",
    "            \n",
    "            # F_x                    \n",
    "            self.F_x[-1,:] = (self.R_x[-1,:].clone()\n",
    "                                 + (1/(self.lambd.clone()))* self.R_ux[-1,:,:].clone().t().mv(Sigma_tilde_inv_t_R_u.clone())\n",
    "                                 + (1/(2*self.lambd.clone()**2))* self.R_ux[-1,:,:].clone().t().mv(\n",
    "                                      Sigma_tilde_inv_t_R_uu.clone().mv(Sigma_tilde_inv_R_u.clone()))\n",
    "                            )\n",
    "            \n",
    "            \n",
    "        \n",
    "            # F_0   \n",
    "            self.F_0[-1] = (self.R_0[-1].clone() \n",
    "                            +  (1/(2*self.lambd.clone()))* self.R_u[-1,:].clone().dot(Sigma_tilde_inv_R_u.clone())\n",
    "                            + (1/(4*self.lambd.clone()**2))* self.R_u[-1,:].clone().dot(\n",
    "                                Sigma_tilde_inv_t_R_uu.clone().mv(Sigma_tilde_inv_R_u.clone()))\n",
    "                           )\n",
    "            \n",
    "            # for the Q-function at the last step:\n",
    "            self.Q_xx[-1,:,:] = self.R_xx[-1,:,:].clone()\n",
    "            self.Q_ux[-1,:,:] = self.R_ux[-1,:,:].clone()\n",
    "            self.Q_uu[-1,:,:] = self.R_uu[-1,:,:].clone()\n",
    "            self.Q_u[-1,:] = self.R_u[-1,:].clone()\n",
    "            self.Q_x[-1,:] = self.R_x[-1,:].clone()\n",
    "            self.Q_0[-1] = self.R_0[-1].clone()\n",
    "            \n",
    "            \n",
    "            \n",
    "            \n",
    "                \n",
    "    def G_learning(self, err_tol, max_iter):\n",
    "        \"\"\"\n",
    "        find the optimal policy for the time dependent policy\n",
    "        \n",
    "        \"\"\"   \n",
    "        print('Doing G-learning, it may take a few seconds...')\n",
    "        \n",
    "        # set terminal conditions\n",
    "        self.set_terminal_conditions()\n",
    "        \n",
    "        # allocate iteration numbers for all steps\n",
    "        self.iter_counts = np.zeros(self.num_steps)\n",
    "        \n",
    "        # iterate over time steps backward\n",
    "        for t in range(self.num_steps-2,-1,-1):\n",
    "            self.step_G_learning(t, err_tol, max_iter)\n",
    "            \n",
    "    def step_G_learning(self, t, err_tol, max_iter):\n",
    "        \"\"\"\n",
    "        Perform one step of backward iteration for G-learning self-consistent equations\n",
    "        This should start from step t = num_steps - 2 (i.e. from a step that is before the last one)\n",
    "        \"\"\"\n",
    "            \n",
    "        # make matrix Sigma_hat_t        \n",
    "        one_plus_exp_ret = torch.ones(self.num_assets,dtype=torch.float64) + self.exp_returns[t,:]\n",
    "        Sigma_hat_t = self.Sigma_r_tilde + torch.ger(one_plus_exp_ret, one_plus_exp_ret)\n",
    "        \n",
    "        # matrix A_t = diag(1 + r_bar_t)\n",
    "        A_t = torch.diag(torch.ones(self.num_assets,dtype=torch.float64) + self.exp_returns[t,:])\n",
    "                    \n",
    "        # update parameters of Q_function using next-step F-function values\n",
    "        self.update_Q_params(t, A_t,Sigma_hat_t)\n",
    "             \n",
    "        # iterate between policy evaluation and policy improvement  \n",
    "        while self.iter_counts[t] < max_iter:\n",
    "                \n",
    "            curr_u_bar_prior = self.u_bar_prior[t,:].clone()  \n",
    "            curr_v_bar_prior = self.v_bar_prior[t,:,:].clone()     \n",
    "                \n",
    "            # compute parameters of F-function for this step from parameters of Q-function\n",
    "            self.update_F_params(t) \n",
    "              \n",
    "            # Policy iteration step: update parameters of the prior policy distribution\n",
    "            # with given Q- and F-function parameters\n",
    "            self.update_policy_params(t)    \n",
    "            \n",
    "            # difference between the current value of u_bar_prior and the previous one\n",
    "            err_u_bar = torch.sum((curr_u_bar_prior - self.u_bar_prior[t,:])**2)\n",
    "            \n",
    "            # divide by num_assets in err_v_bar to get both errors on a comparable scale\n",
    "            err_v_bar = (1/self.num_assets)*torch.sum((curr_v_bar_prior - self.v_bar_prior[t,:,:])**2)\n",
    "            \n",
    "            # choose the difference from the previous iteration as the maximum of the two errors\n",
    "            tol = torch.max(err_u_bar, err_v_bar)  # tol = 0.5*(err_u_bar + err_v_bar)\n",
    "            \n",
    "            self.iter_counts[t] += 1\n",
    "            # Repeat the calculation of Q- and F-values\n",
    "            if tol <= err_tol:\n",
    "                break\n",
    "                \n",
    "            \n",
    "    def update_Q_params(self,t, A_t,Sigma_hat_t):\n",
    "        \"\"\"\n",
    "        update the current (time-t) parameters of Q-function from (t+1)-parameters of F-function\n",
    "        \"\"\" \n",
    "                \n",
    "        ones = torch.ones(self.num_assets,dtype=torch.float64)    \n",
    "        one_plus_exp_ret = torch.ones(self.num_assets,dtype=torch.float64) + self.exp_returns[t,:]\n",
    "    \n",
    "        self.Q_xx[t,:,:] = (self.R_xx[t,:,:].clone() \n",
    "                            + self.gamma *( (A_t.clone().mm(self.F_xx[t+1,:,:].clone())).mm(A_t.clone())  \n",
    "                                           + self.Sigma_r_tilde.clone() * self.F_xx[t+1,:,:].clone() ) )\n",
    "\n",
    "\n",
    "        self.Q_ux[t,:,:] = (self.R_ux[t,:,:].clone() \n",
    "                            + 2 * self.gamma *( (A_t.clone().mm(self.F_xx[t+1,:,:].clone())).mm(A_t.clone())  \n",
    "                                           + self.Sigma_r_tilde.clone() * self.F_xx[t+1,:,:].clone() ) \n",
    "                           )\n",
    "    \n",
    "        self.Q_uu[t,:,:] = (self.R_uu[t,:,:].clone()  \n",
    "                            + self.gamma *( (A_t.clone().mm(self.F_xx[t+1,:,:].clone())).mm(A_t.clone())  \n",
    "                                           + self.Sigma_r_tilde.clone() * self.F_xx[t+1,:,:].clone() )\n",
    "                            - self.Omega_mat.clone()\n",
    "                           )\n",
    "\n",
    "\n",
    "        self.Q_x[t,:] = self.R_x[t,:].clone() + self.gamma * A_t.clone().mv(self.F_x[t+1,:].clone()) \n",
    "        self.Q_u[t,:] = self.R_u[t,:].clone() + self.gamma * A_t.clone().mv(self.F_x[t+1,:].clone())\n",
    "        self.Q_0[t]   = self.R_0[t].clone() + self.gamma * self.F_0[t+1].clone()\n",
    "\n",
    "\n",
    "        \n",
    "    def update_F_params(self,t):\n",
    "        \"\"\"\n",
    "        update the current (time-t) parameters of F-function from t-parameters of G-function\n",
    "        This is a policy evaluation step: it uses the current estimations of the mean parameters of the policy\n",
    "        \n",
    "        \"\"\"\n",
    "        \n",
    "        # produce auxiliary parameters U_t, W_t, Sigma_tilde_t\n",
    "        U_t = (self.beta.clone() * self.Q_ux[t,:,:].clone() \n",
    "               + self.Sigma_prior_inv[t,:,:].clone().mm(self.v_bar_prior[t,:,:].clone()))\n",
    "        W_t = (self.beta.clone() * self.Q_u[t,:].clone() \n",
    "               +  self.Sigma_prior_inv[t,:,:].clone().mv(self.u_bar_prior[t,:]).clone())\n",
    "        Sigma_p_bar =  self.Sigma_prior_inv[t,:,:].clone() - 2 * self.beta.clone() * self.Q_uu[t,:,:].clone()\n",
    "        Sigma_p_bar_inv = torch.inverse(Sigma_p_bar + self.reg_mat)\n",
    "        \n",
    "        # update parameters of F-function\n",
    "        self.F_xx[t,:,:] = self.Q_xx[t,:,:].clone() + (1/(2*self.beta.clone()))*(U_t.t().mm(Sigma_p_bar_inv.clone().mm(U_t))\n",
    "                                    - self.v_bar_prior[t,:,:].clone().t().mm(\n",
    "                                        self.Sigma_prior_inv[t,:,:].clone().mm(self.v_bar_prior[t,:,:].clone())))\n",
    "        \n",
    "        \n",
    "        self.F_x[t,:] = self.Q_x[t,:].clone() + (1/self.beta.clone())*(U_t.mv(Sigma_p_bar_inv.clone().mv(W_t))\n",
    "                                    - self.v_bar_prior[t,:,:].clone().mv(\n",
    "                                        self.Sigma_prior_inv[t,:,:].clone().mv(self.u_bar_prior[t,:].clone())))\n",
    "        \n",
    "        \n",
    "        self.F_0[t] = self.Q_0[t].clone() + ( (1/(2*self.beta.clone()))*(W_t.dot(Sigma_p_bar_inv.clone().mv(W_t))\n",
    "                                    - self.u_bar_prior[t,:].clone().dot(\n",
    "                                        self.Sigma_prior_inv[t,:,:].clone().mv(self.u_bar_prior[t,:].clone())))\n",
    "                                    - (1/(2*self.beta.clone())) * (torch.log(torch.det(self.Sigma_prior[t,:,:].clone()+\n",
    "                                                                              self.reg_mat))\n",
    "                                                       - torch.log(torch.det(Sigma_p_bar_inv.clone() + self.reg_mat))) )\n",
    "        \n",
    "        \n",
    "        \n",
    "    def update_policy_params(self,t):\n",
    "        \"\"\"\n",
    "        update parameters of the Gaussian policy using current coefficients of the F- and G-functions\n",
    "        \"\"\"\n",
    "        \n",
    "        new_Sigma_prior_inv = self.Sigma_prior_inv[t,:,:].clone() - 2 * self.beta.clone() * self.Q_uu[t,:,:].clone()\n",
    "        \n",
    "        Sigma_prior_new = torch.inverse(new_Sigma_prior_inv + self.reg_mat)\n",
    "        \n",
    "        # update parameters using the previous value of Sigma_prior_inv\n",
    "        self.u_bar_prior[t,:] = Sigma_prior_new.mv(self.Sigma_prior_inv[t,:,:].clone().mv(self.u_bar_prior[t,:].clone())\n",
    "                                              + self.beta.clone() * self.Q_u[t,:].clone())\n",
    "        \n",
    "        \n",
    "        self.v_bar_prior[t,:,:] = Sigma_prior_new.clone().mm(self.Sigma_prior_inv[t,:,:].clone().mm(self.v_bar_prior[t,:,:].clone())\n",
    "                                              + self.beta.clone() * self.Q_ux[t,:,:].clone())\n",
    "        \n",
    "        # and then assign the new inverse covariance for the prior for the next iteration\n",
    "        self.Sigma_prior[t,:,:] = Sigma_prior_new.clone()\n",
    "        self.Sigma_prior_inv[t,:,:] = new_Sigma_prior_inv.clone()\n",
    "        \n",
    "        # also assign the same values for the previous time step\n",
    "        if t > 0:\n",
    "            self.Sigma_prior[t-1,:,:] = self.Sigma_prior[t,:,:].clone()\n",
    "            self.u_bar_prior[t-1,:] = self.u_bar_prior[t,:].clone()\n",
    "            self.v_bar_prior[t-1,:,:] = self.v_bar_prior[t,:,:].clone()\n",
    "            \n",
    "    def trajs_to_torch_tensors(self,trajs):\n",
    "        \"\"\"\n",
    "        Convert data from a list of lists into Torch tensors\n",
    "        \"\"\"\n",
    "        num_trajs = len(trajs)\n",
    "        \n",
    "        self.data_xvals = torch.zeros(num_trajs,self.num_steps,self.num_assets,dtype=torch.float64)\n",
    "        self.data_uvals = torch.zeros(num_trajs,self.num_steps,self.num_assets,dtype=torch.float64)\n",
    "            \n",
    "        for n in range(num_trajs):\n",
    "            for t in range(self.num_steps):\n",
    "                self.data_xvals[n,t,:] = torch.tensor(trajs[n][t][0],dtype=torch.float64).clone()\n",
    "                self.data_uvals[n,t,:] = torch.tensor(trajs[n][t][1],dtype=torch.float64).clone()\n",
    "                \n",
    "    def compute_reward_on_traj(self,\n",
    "                              t,\n",
    "                              x_t, u_t):\n",
    "        \"\"\"\n",
    "        Given time t and corresponding values of vectors x_t, u_t, compute the total reward for this step\n",
    "        \"\"\"\n",
    "        \n",
    "        aux_xx = x_t.dot(self.R_xx[t,:,:].clone().mv(x_t))\n",
    "        aux_ux = u_t.dot(self.R_ux[t,:,:].clone().mv(x_t))\n",
    "        aux_uu = u_t.dot(self.R_uu[t,:,:].clone().mv(u_t))\n",
    "        aux_x = x_t.dot(self.R_x[t,:].clone())\n",
    "        aux_u = u_t.dot(self.R_u[t,:].clone())\n",
    "        aux_0 = self.R_0[t].clone()\n",
    "        \n",
    "        return aux_xx + aux_ux + aux_uu + aux_x + aux_u + aux_0\n",
    "    \n",
    "    def compute_G_fun_on_traj(self,\n",
    "                              t,\n",
    "                              x_t, u_t):\n",
    "        \"\"\"\n",
    "        Given time t and corresponding values of vectors x_t, u_t, compute the total reward for this step\n",
    "        \"\"\"\n",
    "        \n",
    "        aux_xx = x_t.dot(self.Q_xx[t,:,:].clone().mv(x_t))\n",
    "        aux_ux = u_t.dot(self.Q_ux[t,:,:].clone().mv(x_t))\n",
    "        aux_uu = u_t.dot(self.Q_uu[t,:,:].clone().mv(u_t))\n",
    "        aux_x = x_t.dot(self.Q_x[t,:].clone())\n",
    "        aux_u = u_t.dot(self.Q_u[t,:].clone())\n",
    "        aux_0 = self.Q_0[t].clone()\n",
    "        \n",
    "        return aux_xx + aux_ux + aux_uu + aux_x + aux_u + aux_0\n",
    "    \n",
    "    def compute_F_fun_on_traj(self,\n",
    "                              t,\n",
    "                              x_t):\n",
    "        \"\"\"\n",
    "        Given time t and corresponding values of vectors x_t, u_t, compute the total reward for this step\n",
    "        \"\"\"\n",
    "        \n",
    "        aux_xx = x_t.dot(self.F_xx[t,:,:].clone().mv(x_t))\n",
    "        aux_x = x_t.dot(self.F_x[t,:].clone())\n",
    "        aux_0 = self.F_0[t].clone()\n",
    "        \n",
    "        return aux_xx + aux_x + aux_0\n",
    "\n",
    "                 \n",
    "    def MaxEntIRL(self,\n",
    "                  trajs,\n",
    "                  learning_rate,\n",
    "                  err_tol, max_iter):\n",
    "        \n",
    "        \"\"\"\n",
    "        Estimate parameters of the reward function using MaxEnt IRL.\n",
    "        Inputs:\n",
    "        \n",
    "        trajs - a list of trajectories. Each trajectory is a list of state-action pairs, stored as a tuple.\n",
    "                We assume each trajectory has the same length\n",
    "        \"\"\"\n",
    "        \n",
    "        # omega is a tunable parameter that determines the cost matrix self.Omega_mat\n",
    "        omega_init = 15.0\n",
    "        self.omega = torch.tensor(omega_init, requires_grad=True, dtype=torch.float64)\n",
    "        \n",
    "        # also set beta to be small \n",
    "        beta_init = 50 \n",
    "        self.beta = torch.tensor(beta_init, requires_grad=True, dtype=torch.float64)\n",
    "        \n",
    "        reward_params =  [self.lambd, self.eta, self.rho, self.omega, self.beta]\n",
    "        \n",
    "        print(\"Omega mat...\")\n",
    "        self.Omega_mat = self.omega * torch.eye(self.num_assets,dtype=torch.float64)\n",
    "        print(\"g learning...\")\n",
    "        self.reset_prior_policy()\n",
    "        self.G_learning(err_tol, max_iter)\n",
    "        print(\"intialize optimizer...\")\n",
    "        optimizer = optim.Adam(reward_params, lr=learning_rate)\n",
    "        print(\"zero grad...\")\n",
    "        optimizer.zero_grad()\n",
    "        \n",
    "        num_trajs = len(trajs)\n",
    "        print(\"trajs_to_torch_tensors...\")\n",
    "        # fill in Torch tensors for the trajectory data\n",
    "        self.trajs_to_torch_tensors(trajs)\n",
    "        print(\"constructing zero tensors...\")   \n",
    "        self.realized_rewards = torch.zeros(num_trajs,self.num_steps,dtype=torch.float64,requires_grad=True)\n",
    "        self.realized_cum_rewards = torch.zeros(num_trajs, dtype=torch.float64, requires_grad=True)\n",
    "        print(\"constructing zero tensors...\")  \n",
    "        self.realized_G_fun = torch.zeros(num_trajs,self.num_steps,dtype=torch.float64, requires_grad=True)\n",
    "        self.realized_F_fun = torch.zeros(num_trajs,self.num_steps,dtype=torch.float64, requires_grad=True)\n",
    "        print(\"constructing zero tensors...\")  \n",
    "        self.realized_G_fun_cum = torch.zeros(num_trajs,dtype=torch.float64, requires_grad=True)\n",
    "        self.realized_F_fun_cum = torch.zeros(num_trajs,dtype=torch.float64, requires_grad=True)\n",
    "        print(\"done...\")  \n",
    "        \n",
    "        num_iter_IRL = 3\n",
    "        \n",
    "        for i in range(num_iter_IRL):\n",
    "            \n",
    "            print('GIRL iteration = ', i)\n",
    "       \n",
    "            self.Omega_mat = self.omega * torch.eye(self.num_assets,dtype=torch.float64)\n",
    "    \n",
    "            for n in range(101):\n",
    "                if n%100==0:\n",
    "                    print(n)\n",
    "                for t in range(self.num_steps):\n",
    "                    \n",
    "                    \n",
    "                    # compute rewards obtained at each step for each trajectory\n",
    "                    # given the model paramaters\n",
    "        \n",
    "                    self.realized_rewards[n,t] = self.compute_reward_on_traj(t,\n",
    "                                                                self.data_xvals[n,t,:],\n",
    "                                                                self.data_uvals[n,t,:])\n",
    "                                                                \n",
    "            \n",
    "                    # compute the log-likelihood by looping over trajectories\n",
    "                    self.realized_G_fun[n,t] = self.compute_G_fun_on_traj(t,\n",
    "                                                                self.data_xvals[n,t,:],\n",
    "                                                                self.data_uvals[n,t,:])\n",
    "                \n",
    "                \n",
    "                    self.realized_F_fun[n,t] = self.compute_F_fun_on_traj(t,\n",
    "                                                                self.data_xvals[n,t,:])\n",
    "                \n",
    "\n",
    "                self.realized_cum_rewards[n] = self.realized_rewards[n,:].sum().clone()\n",
    "                self.realized_G_fun_cum[n] = self.realized_G_fun[n,:].sum().clone()\n",
    "                self.realized_F_fun_cum[n] = self.realized_F_fun[n,:].sum().clone()\n",
    "            \n",
    "            # the negative log-likelihood will not include terms ~ Sigma_p as we do not optimize over its value\n",
    "            loss = - self.beta.clone()*(self.realized_G_fun_cum.sum().clone() - self.realized_F_fun_cum.sum().clone())\n",
    "        \n",
    "            optimizer.zero_grad()\n",
    "        \n",
    "            loss.backward() \n",
    "        \n",
    "            optimizer.step()\n",
    "        \n",
    "            print('Iteration = ', i)\n",
    "            print('Loss = ', loss.detach().numpy())\n",
    "        \n",
    "           \n",
    "        print('Done optimizing reward parameters')\n",
    "            "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "N_8XeDlpPQOV"
   },
   "source": [
    "## Simulate portfolio data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "Gy2u0fkyPQOX"
   },
   "source": [
    "### Simulate the market factor under a lognormal distribution with a fixed drift and vol"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "F25fZ-s9PQOb"
   },
   "outputs": [],
   "source": [
    "mu_market = 0.05\n",
    "vol_market = 0.25\n",
    "init_market_val = 100.0\n",
    "\n",
    "r_rf = 0.02  # risk-free rate - the first asset will be cash\n",
    "\n",
    "num_steps = 10\n",
    "dt = 0.25 # quarterly time steps\n",
    "\n",
    "num_risky_assets = 99 \n",
    "\n",
    "returns_market = np.zeros(num_steps)\n",
    "market_vals = np.zeros(num_steps)\n",
    "market_vals[0] = 100.0  # initial value\n",
    "\n",
    "\n",
    "        \n",
    "for t in range(1,num_steps):\n",
    "\n",
    "        rand_norm = np.random.randn()\n",
    "        \n",
    "        # use log-returns of market as 'returns_market'\n",
    "        returns_market[t] = mu_market * dt + vol_market * np.sqrt(dt) * rand_norm\n",
    "        \n",
    "        market_vals[t] = market_vals[t-1] * np.exp((mu_market - 0.5*vol_market**2)*dt + \n",
    "                                                         vol_market*np.sqrt(dt)*rand_norm)\n",
    "        "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "sUUBRbwbPQO1"
   },
   "source": [
    "### Simulate market betas and idiosyncratic alphas within pre-defined ranges"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 86
    },
    "colab_type": "code",
    "id": "qraR7fRVPQO4",
    "outputId": "dfb260b8-fa17-4a70-a12d-86a4c2964e3c"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0.20079376 0.14342511 0.32652534 0.80769512 0.73842311 0.61644969\n",
      " 0.68542852 0.66677167 0.26954556 0.4096186 ]\n",
      "[-0.04885155 -0.00915397  0.09101752  0.02506121 -0.02768924  0.08933758\n",
      "  0.11899387 -0.03891402  0.08100567  0.0624508 ]\n"
     ]
    }
   ],
   "source": [
    "beta_min = 0.05\n",
    "beta_max = 0.85\n",
    "beta_vals = np.random.uniform(low=beta_min, high=beta_max, size=num_risky_assets)\n",
    "\n",
    "alpha_min = - 0.05\n",
    "alpha_max = 0.15\n",
    "alpha_vals = np.random.uniform(low=alpha_min, high=alpha_max, size=num_risky_assets)\n",
    "\n",
    "print(beta_vals[0:10])\n",
    "print(alpha_vals[0:10])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "DNFzG5dgPQPB"
   },
   "source": [
    "### Simulate time-dependent expected returns"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "a36GRMAlPQPD"
   },
   "outputs": [],
   "source": [
    "# Time-independent expected returns would be equal to alpha + beta * expected_market_return \n",
    "# Make them time-dependent (and correlated with actual returns) as alpha + beta * oracle_market_returns\n",
    "\n",
    "oracle_coeff = 0.2\n",
    "mu_vec = mu_market * np.ones(num_steps)\n",
    "oracle_market_returns = mu_vec * dt + oracle_coeff*(returns_market - mu_vec)\n",
    "\n",
    "expected_risky_returns = np.zeros((num_steps, num_risky_assets))\n",
    "\n",
    "for t in range(num_steps):\n",
    "    expected_risky_returns[t,:] = alpha_vals * dt + beta_vals * oracle_market_returns[t]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "DDN_R3znPQPK"
   },
   "source": [
    "### Initial values of all assets "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "YneTj6IBPQPM"
   },
   "outputs": [],
   "source": [
    "val_min = 20.0\n",
    "val_max = 120.0\n",
    "\n",
    "init_risky_asset_vals = np.random.uniform(low=val_min, high=val_max, size=num_risky_assets)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "nqd7WIxbPQPS"
   },
   "source": [
    "### Simulate realized returns and asset prices"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "tSL2TU1IPQPW"
   },
   "outputs": [],
   "source": [
    "# Generate realized returns and realized asset values by simulating from a one-factor model \n",
    "# with time-dependent expected returns\n",
    "\n",
    "risky_asset_returns = np.zeros((num_steps, num_risky_assets))\n",
    "risky_asset_vals = np.zeros((num_steps, num_risky_assets))\n",
    "\n",
    "idiosync_vol =  0.05 # vol_market \n",
    "\n",
    "for t in range(num_steps):\n",
    "    \n",
    "    rand_norm = np.random.randn(num_risky_assets)\n",
    "        \n",
    "    # asset returns are simulated from a one-factor model\n",
    "    risky_asset_returns[t,:] = (expected_risky_returns[t,:] + beta_vals * (returns_market[t] - mu_market * dt) \n",
    "                         + idiosync_vol * np.sqrt(1 - beta_vals**2) * np.sqrt(dt) * rand_norm)\n",
    "        \n",
    "    # asset values\n",
    "    if t == 0:\n",
    "        risky_asset_vals[t,:] = init_risky_asset_vals\n",
    "    else:\n",
    "        risky_asset_vals[t] = risky_asset_vals[t-1] * (1 + risky_asset_returns[t,:])\n",
    "   "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 295
    },
    "colab_type": "code",
    "id": "YvHvV7B6PQPh",
    "outputId": "3421ed28-a65f-4362-d166-990d22dbf1fd"
   },
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAEWCAYAAABrDZDcAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAgAElEQVR4nOydd5gUVfaw30POOecgcUgCIiBJBKZVcAywgphXcA2rrquCfrsr7rqrK67+xAwqqIygYEJlhiSIJCUoYUDJYWCAIQ1xYML9/rjV0AyTu3uqe+a8z9NPd1XdunW6qrtO3XPOPUeMMSiKoihFl2JuC6AoiqK4iyoCRVGUIo4qAkVRlCKOKgJFUZQijioCRVGUIo4qAkVRlCKOKoIiiIgsEpH7nM8jRWRugPtvIiJGREoEsl8ldBGRcSIy1W05lPyhiiBEEZGdInJGRE6KyH4RmSIiFQJ9HGNMtDFmUKD7DSR6kwk+zu9tgNtyeAk1eQo7qghCmyHGmApAJ+By4GmX5Qk4BTFq0JFJaKHXPPRQRRAGGGP2A3OwCgEAESktIi+LyG4ROSAi74hIWWdbVRH5VkQSReSo87lBZn2LyN0issT5/JQzAvG+UkRkirOtsoi8LyIJIrJXRJ4XkeLOtuKOLIdEZDtwfXbfx3naGyMi64BTIlJCROqJyOeOzDtE5BGnrQd4BrjVkWmtTx8DfPo8P2rwMU39UUR2A9/7rLvLOWeHROT/+ezfTURWichx53y+koXsm0RksM9yCaevziJSRkSmishhETkmIitFpHYW/WT6fZ1ts0Xkfz7Ln4rIBz7Xa6mIvC4iSSLym4hc49M2y+vkbB/lfIcTIrLRkftjoBHwjXOOn3LadheRZc53WSsi/Xz6aSoiPzj9zANqZHO9+4lIvHPN9wOTnfWDReRXp/9lItLBWX+JPN4+MvR7/jfgXP+Zzvk/DtztrPtMRD5y5IwTka4++49xztEJEfnd9zwWOYwx+grBF7ATGOB8bgCsB17z2f5/wCygGlAR+AZ4wdlWHbgFKOdsmwF85bPvIuA+5/PdwJJMjt8Q2Adc5yx/BbwLlAdqAT8D9zvb/gT85uxTDVgIGKBENt/tV6d9WewDyWrgH0ApoBmwHYh02o8DpmZ1fjK2AZo4x//Ikbesz7pJznJH4CzQxtlnOXCH87kC0D0L2f8BRPssXw/85ny+37kO5YDiQBegUiZ95PR96wAHgf7ASGdbRZ/rlQr8BSgJ3AokAdVycZ2GAXuBKwABLgMaZ3E+6wOHgesceQc6yzV9ztcrQGmgD3Ai4zXy6aufI/N/nfZlgc7Od7zSOVd3OTKUzkKefkB8Nv+RcUAKcKMjb1lnXbLzHYoDLwArnPatgD1APZ/fTHO3//eu3W/cFkBfWVwY+yM/6fzBDLAAqOJsE+CU7w8X6AHsyKKvTsBRn+VFZKMInD/RamCMs1wbe9Ms69NmBLDQ+fw98CefbYPIWRHc67N8JbA7Q5ungcnO53EZbzKZ3CjOt+HCTb+Zz3bvugY+634GhjufFwPPATVyuC6XOdeknLMcDfzD+XwvsAzokEMf2X5fZ/lm50Z1COjls/5urIKWDN/jjlxcpznAo9lcE9/zOQb4OEObOdgbdiPsjb28z7ZPMl4jn239gHNAGZ91bwP/ytDud6BvFvL0I2dFsDjD9nHAfJ/ltsAZn+t4EBgAlAzEfzacX2oaCm1uNMZUxP4JWnNh+F0T+9S52hlWHwNinfWISDkReVdEdjnD5MVAFV8TQQ68D/xujPmvs9wY+/SZ4HO8d7FPnAD1sDctL7tycQzf9o2Bet6+nf6fwd7Y/GFPJuv2+3w+jX36B/gj0BL4zTHpDL5kT8AYsxXYBAwRkXLADdibIMDH2JvldBHZJyIviUjJTLrJzff9FvsU+7sxZkmG/fca527msAt7DXK6Tg2BbZl9ryxkHJZBxl5AXedYR40xpzLIkB2JxpjkDP3/NUP/DZ2+80turncZESnhXMfHsMrioIhMFxF/jh3WqEMlDDDG/CDWVv8yduh7CDgDRBhj9mayy1+xQ98rjTH7RaQT8At2JJEtIjLW2beXz+o92CfNGsaY1Ex2S8D+ib00yvFL2adz3/53GGNa5KKtl1NYZeilTi73y/wAxmwBRohIMezT+EwRqZ7hZudlGvZJuxiw0bmpYIxJwY4qnhORJsBs7FPu+xn2z+n7Avwbq3CaisgIY8w0n231RUR8lEEjrJkwp+u0B2iexfEynqs92BHBqIwNRaQxUFVEyvucn0aZ9JFT//82xvw7l+0vut7OQ03NHPbJFmPMJ8AnIlIJqzD/ix1ZFTl0RBA+/B8wUEQ6GWPSsbbuV0WkFoCI1BeRSKdtRayiOCYi1YBnc3MAEbkWeAQ7EjnjXW+MSQDmAv8TkUoiUkxEmotIX6fJZ8AjItJARKoCY/P43X4GjjvOu7Jinc/tROQKZ/sBoIlzk/byKzBcREo6DsCheTzmRYjI7SJS0zm3x5zVaVk0n441fz3AhdEAInK1iLR3blLHsTbrzPrI9vuKSB/gHuBO5/W6iNT32b8W9nyXFJFhQBtgdi6u03vAEyLSRSyXOTd1sOe4mc8xpmJHPZGOfGUch20DY8wuYBVW4ZUSkV7AkOzObyZMAv4kIlc6spQXketFpGIW8mzGPs1f74yy/ob1N+QLEWklIv1FpDTWj3CGrK93oUcVQZhgjEnEOj//7qwaA2wFVjjmn/nYJ3mwSqMsduSwAms2yg23Yp+yNsmFyKF3nG13Yh2bG4GjwEysmQDsn3oOsBZYA3yRx++Whr2RdAJ2OHK/B1R2msxw3g+LyBrn89+xT7dHsU/h52/I+cQDxInISeA1rO8gObOGzg13OdAT+NRnUx3seTmOfZr/AXtDzbh/lt/XeTr9CHjYGLPXMQu9D0wWEe+I7ieghbPfv4GhxpjDzrYsr5MxZobT/hOsn+MrrHMfrCP1b46Z5gljzB4gCmuySsQ+wT/JhXvGbVhfxxHsg8ZHmZ7VLDDGrAJGAW84cm7F+j+8ZJQnCXjQOU97sSOEi6KI8khp4EXsOdyPVa7P+NFfWCMXmxoVRQllRORurKO/V05tFSW36IhAURSliKOKQFEUpYijpiFFUZQijo4IFEVRijhhOY+gRo0apkmTJm6LoSiKElasXr36kDEm4/yL8FQETZo0YdWqVW6LoSiKElaISKYzwNU0pCiKUsRRRaAoilLEUUWgKIpSxFFFoCiKUsRRRaAoilLEUUWgKIpSxFFFoCiKUsRRRaAoihIOnDoFjz0G23JbZC73BEQRiIhHRH4Xka1OhauM21uLyHIROSsiT+RlX0VRFAX49FN47TXYty/gXfutCJxqTG8C12KLQ48QkbYZmh3BVr56OR/7KoqiKBMnQps20CvwpSgCMSLoBmw1xmw3xpzDlvGL8m1gjDlojFmJLd2Xp30VRVGKPGvXwk8/wejRIDmWHs8zgVAE9bFl7LzEO+sCuq+IjBaRVSKyKjExMV+CKoqihCWTJkHp0nDnnUHpPhCKIDP1lNsiB7ne1xgz0RjT1RjTtWbNS5LnKYqiFE5OnYKPP4Zhw6BatZzb54NAKIJ4oKHPcgMgt94Mf/ZVFEUp/Hz2GRw/bs1CQSIQimAl0EJEmopIKWA4MKsA9lUURSn8BNFJ7MXvegTGmFQReRiYAxQHPjDGxInIn5zt74hIHWAVUAlIF5HHgLbGmOOZ7euvTIqiKIWCdetgxQp49dWgOIm9BKQwjTFmNjA7w7p3fD7vx5p9crWvoiiKgh0NlC4Nd9wR1MPozGJFUZRQ5PRp6yQeOhSqVw/qoVQRKIqihCKffmqdxPffH/RDqSJQFEUJRSZOhNatg+ok9qKKQFEUJdTwOomDNJM4I6oIFEVRQg2vkzhIM4kzoopAURQllChAJ7EXVQSKoiihRAHMJM6IKgJFUZRQwusk7t27wA6pikBRFCVUWL8eli8vMCexF1UEiqIoocLEiVCqVIE5ib2oIlAURQkFXHASe1FFoCiKEgp89hkkJRXITOKMqCJQFEUJBSZOhFatCtRJ7EUVgaIoitu45CT2oopAcZ/NmyElxW0pFMU9XHISe1FFoLjL2rU2Znr4cEhNdVsaRSl4fJ3ENWq4IoIqAsVdPv7Yvn/xhR0Wp6e7K4+iFDQzZlgncQHOJM5IQCqUKUq+SEuDadPghhugUyd47jmoUgX+9z9X7KSK4grvvmudxH36uCaCKgLFPRYtgn37YORIOyw+etTWZq1WDf72N7elU5Tg43USu/zwExDTkIh4ROR3EdkqImMz2S4iMsHZvk5EOvts2yki60XkVxFZFQh5lDAhOhoqVoTBg+2f4NVXrbPs73+HN95wWzpFCT6TJrnqJPbi94hARIoDbwIDgXhgpYjMMsZs9Gl2LdDCeV0JvO28e7naGHPIX1mUMCI5GT7/HG65BcqWteuKFYP337f20j//2ZqJbr/dXTkVJVh4ncS33OKak9hLIEYE3YCtxpjtxphzwHQgKkObKOAjY1kBVBGRugE4thKufPutTbWb8UZfogRMnw79+8Pdd8OsWa6IpyhBZ8YMOHbMlZnEGQmEIqgP7PFZjnfW5baNAeaKyGoRydJtLiKjRWSViKxKTEwMgNiKq0ydCnXrQr9+l24rUwa++go6d4Y//MH6EhSlsDFxIrRs6aqT2EsgFEFmHg6ThzZXGWM6Y81HD4lIpmfFGDPRGNPVGNO1Zs2a+ZdWcZ8jR2D2bBgxAooXz7xNxYoQEwPNm8OQIbBK3UdKIWLDBli2zLWZxBkJhCKIBxr6LDcA9uW2jTHG+34Q+BJralIKMzNn2pnEI0dm3656dZg719pPPR7YuDH79ooSLnhnEt91l9uSAIFRBCuBFiLSVERKAcOBjIbdWcCdTvRQdyDJGJMgIuVFpCKAiJQHBgEbAiCTEspER9vZxJdfnnPb+vVh/nwoWRIGDYKdO4MunqIElRByEnvxWxEYY1KBh4E5wCbgM2NMnIj8SUT+5DSbDWwHtgKTgAed9bWBJSKyFvgZ+M4YE+uvTEoIs3s3LF5sncS5HRI3bw5z5sCpUzBwIOzfH1wZFSWYzJxpncQuziTOiBiT0Zwf+nTt2tWsUptxePLii/D007B9OzRtmrd9ly+HAQPgssusA7lq1aCIqChB5aqr4NAh+O23AvcPiMhqY0zXjOs115BSsERHQ8+eeVcCAD162GiiTZvg+uvtCEFRwokQcxJ7UUWgFBzr1tk/Qk5O4uwYONDmJ/rpJ7j5Zjh7NnDyKUqw8c4kDhEnsRdVBErBER1tJ4z94Q/+9XPLLfYPNXcu3HGHTV6nKKHOmTPw0Uf2ASZEnMReNOmcUjCkp8Mnn9gw0ED8Ce691zrc/vpXqFTJKoYQGmoryiWE0EzijKgiUAqGxYshPh7Gjw9cn48/bjOWPv+8dRy/9JIqAyV0mTgRWrSAvn3dluQSVBEoBUN0NFSoYGsPBJJ//tMqg5dftumrn346sP0rSiCIi4OlS+2DUAg+rKgiUILP2bM2dvqmm6BcucD2LQITJtgh9zPP2IylDzwQ2GMoir94ZxLffbfbkmSKKgIl+MyebW/UwUopXawYTJ5s01c/9JBVBiNGBOdYipJXQthJ7EWjhpTgM3Uq1K5tU0sHi5Il4bPPbCbHO++E774L3rEUJS+E4EzijKgiUILLsWO29sDw4TZ0NJiULWvrF3TsaEtfLl4c3OMpSm54913rJM4s5XqIoIpACS6ffw7nzvk3iSwvVKpk01c3aWLTV69ZUzDHVZTM8DqJQ2wmcUZUESjBJTraPg11vSS9SfCoWdNONqtSBSIjbU4XRXGDSZOs2TLEZhJnRBWBEjzi421yuLxkGg0UDRva9NXFitn01bt3F+zxFeXMGfjwQ+skDvFiWho1pASPadPAGLjtNneO36KFTV/dr5/NUfTjj1CrljuyhBsTJsBbb1kFnptXsWKBb1usmH3demvwIs6CiddJHIIziTOiaaiV4NGpk60/vGKFu3IsXWoVQatWdoRSubK78oQ6xkCzZvYm3KWLXc7plZ6eu3Z53f/IEZuy/K23wm9+SO/etnbG5s1+j4iTzqTw9a97mf7zHl6/7XKa16yQr36ySkOtIwIlOMTFwdq19snSba66Cr74ws5qHjzYjhICPbGtMPH777YS3Ntvw5/+lGPzoHLunI0Ae9CpZRUuyiAuDpYs8SvtiTGGlTuPMv3n3Xy3PoGzqem0q1+JY6dTAiysKgIlWERH28L0t97qtiQWj8fOZxg+3N5YvvrKzvRULiXWKRLo8bgrB9hrNHNm+CkDr5M4HzOJD588y+dr4pm+cg/bE09RsXQJhnVtwPArGtGufnBGs6oIlMCTnm4VwaBBoWWT/8Mf7Ozj0aPtpDOvslIuJibG1pRu0sRtSSzhpgx8ZxLn0kmcnm5YsvUQn67cw9yN+0lJM3RtXJUHhjbn+g51KVcquLdqVQRK4Fm61Ebp/Oc/uWoef/Q05UuVoEq5kkiwo4tGjbJJ6saMseGlb78d0vHdBc7p0/DDD6F3ow0nZfD55/Y3louZxPuTkpmxag+frtpD/NEzVC1Xkjt7NGH4FQ1pUbtiAQhrCYgiEBEP8BpQHHjPGPNihu3ibL8OOA3cbYxZk5t9lTAkOtra4KOismySnm5YtPkg7/6wnZ92HAGgYpkSNK5ejkbVytGoWnkaVy9H42rlaFitHPWqlKV4sQDdsJ96yv5RX3zRpq9+4YXA9FsY+OEHmyTw2mvdluRSwkUZvPuurat99dWZbk5NS2fh74lM/3k3C38/SLqBqy6rzhhPawZF1KZ0iYIfpfqtCESkOPAmMBCIB1aKyCxjzEafZtcCLZzXlcDbwJW53FcJJ86dswU4brzRpp3OwNnUNL7+ZR+TftzOloMnqVu5DE95WlGqeDF2HznNrsOn2ZRwgnkbD5CSdiGirWRxoUFVqxQaVyt3QWE473keOv/nPxcrg6ee8vebFw5iYqBsWUzv3iSfS8NgbAAP1nnpvSLGAIZLtoP3s92GubBst2Xozydo0WTRX82KpalYpqRtFOrKYOPGLJ3Euw+f5tNVu5mxKp6DJ85Sq2JpHujXnFu7NqJRdXeDFwIxIugGbDXGbAcQkelAFOB7M48CPjL2yq4QkSoiUhdokot9lXAiNtaG/GVIKZF0OoXon3cxZelODp44S+s6FXn11o4M7lCPksUvndeYlm5ISDrD7sOn2XXkNLuPnHY+n+KX3Uc5kZx6UfuaFUvTyFESXuVglUV5alQodanJSQTefNPGeY8ZY5XBqFEBPx1hR2wsJ3v0YuT7q1kbn+S2NOdpWqM87epXpl29SrSvX5mIj6dR+Y4RoacMMswkPpuaxty4A0xfuZulWw9TTKBfq1oMv6Ih/VvXokQmv303CIQiqA/s8VmOxz7159Smfi73BUBERgOjARo1auSfxErwmDrVOsgGDgSs/f+DJTv5dOVuTp1Lo9dlNXh5WEd6t6iRrT+geDE7AmhQtRw9M9l+7PQ5djlKYs+R0+w6fIpdh0+zfPthvvx170VPmuVKFXfMTb4jifI0rlaO+pOnUPL4cTvpp3Jl/+sphzHnft9CqS1beKVJf+KPnuGxAS0oU7I4gjPHC7noIVdEfLZx/np6l7lo+4V9fdeRcV/vNu92YM+R06zfm8SaXUf5Zu2+88dv1v1h/i8+iQ4PPsjWgyeo/sRjVC3vYiSYdybxTTexlbJM+3YjX6yJ5+jpFOpXKcvjA1syrGsD6lYu656MWRAIRZDZvznjLLWs2uRmX7vSmInARLATyvIioJeDJ5I5m5JOw2oaQx4Ujh+Hb76B++5jw8HTTPpxO9+uSwBgSIe6jOrTjIh6gQl/q1KuFFXKlaJjwyqXbEtOSSP+6Bl2H7HKwTua2H7oFIs2J3IuNf1822ICTXs8zJtb9tFi5EgONGtLva7tAiJjOLE+PonF/+8tHgLEcy1zR/eheoXSbot1CYdPniVu33HW701iw94kHh36DM+cSGbguDH8bdkuFl59C+3rV6Z9g8pEOKOHgvoe5z6dQamjRxlXtxdTXllMyeLCwLa1GX5FI3pdVoNigfJxBYFAKIJ4oKHPcgNgXy7blMrFvgHjxZjfmL0+gb8MaMm9vZpmapJQ8o/5/HMkOZlnK3Tkw9eXUL5Uce7p2YR7ejWlfpWCewoqU7I4l9WqwGW1LvVRpKcbDp44a0cQ50cTpxl/5995/x9DmTrmFYqPGcMD/ZoHPWQvFEhOSWPCgi28u3g7Uzas4HTDJvz98ayd/G5TvUJp+rSsSZ+WF8Iyjz14FUduGcrzc99iev1KvFN8ELFx+89vr1u5DO3qV6Z9/cq0q1+JdvUrU6timYDJtGFvEtNX7uamZ8dTrWpdfqwXwTPdG3Nz5wbUCEFlmhmB+KWvBFqISFNgLzAcyJhcZhbwsOMDuBJIMsYkiEhiLvYNGE8MasWJ5FReiPmNL3/Zy39ubk/nRlWDdbgiQ0paOt+s3UeTF9+kWpW6xJRvzJhezbjtykZULlvSbfEuolgxoU7lMtSpXIYrm1X32XI5KZ93Ymj8Gvp/v5WZq+N55ro2DO5QN/ghrS6xetdRnpq5lm2JpxjRvia9XluH3HOP22LlmSpVKkDMLBg6lOGTX2T4W41Ievg+Nu47zoa9SXb0sC+J+ZsOnDcZ1qpY2lEMlc8ridqVSuf6Wp9ITuHrX/cxfeVuNuw9Ttuj8Ty/ewO7n/oH85+8Oux+M34rAmNMqog8DMzBhoB+YIyJE5E/OdvfAWZjQ0e3YsNH78luX39lyop6Vcoy6c6uzInbz7hZcdzy9jJGXtmIpzytqVQmtG5Y4cCJ5BSm/7yHD5buIC1+Lyu2rGHTvY/w49j+roTA+UvJm2+i2bhxfHlLc/62PJE/T/uFj1fsYtyQCNrWq+S2eAHjzLk0Xp77Ox8s3UHdSmX48N5u9N291s4hCIXZxPkhQzRRZaDHAw/Qo/kFZX/ybCobHbNSnKMgvOGbADUqlKZd/UoXKYh6lcucv6kbY1iz+yjTft7Dd+sSOJOSRus6FXnuhgiGfzIXSpak0V8fCst5KUU26dzJs6m8MnczU5btoHqF0owbEsF17euEnSZ3g/1JyUxeuoNPftrNibOpdG9Wjee3z+Oy/z5rc/+3auW2iPnj11/h8svhvfdIu+dePl25h/FzfiPpTAq3XdmIvw5s5a4zMgCs2H6YMZ+vY9fh04y8shFjr21tQzOfeAJef91GfJUv77aY+cebm+ibb3KVqO70uVQ2JRxnfXwSG5wRxJaDJ0lztEO18qWIqFeJy2pVYMmWQ2w5eJLypYpzQ6d6DL+iER0aVEbOnoV69WyAxKefFsS3zDdZJZ0rsorAy/r4JJ7+ch0b9h6nX6ua/CuqnTqTs+D3/SeYuHg7s9buJS3dcF37uozu04wODarYLJXFisHKlW6LmX+MgaZNoUMHW/ISG/b66vzNfLxiFxVKl+CJQS0Z0a1RyIT95ZaTZ1P5b8xvfLxiF42qlePFW9rTs7lPIfWICHszmzfPPSEDRR6VQUaSU9LYlGCVwoa9dgSx5eAJIupVZvgVDRnSsR7lS/sYU6ZOhTvusPUvrrkmwF8msKgiyIbUtHQ+Wr6L/839nTRjeGxAS/6ozmTADoeXbzvMu4u388PmRMqWLM6tVzTkj72aXlCYv/0GbdrAq6/CY4+5K7C/PPKIjQU/dOiiJ+Pf95/guW/iWLbtMK3rVOTZIREXmR1CmR+3JDL28/XsSzrD3T2b8GRkq4sd4bt3Q+PG8L//weOPuydoIPFTGWTEGJO1taBPH0hIsFlbi4X2PSMrRRDaUhcQJYoX495eTZn/1770bVmTF2N+Y8jrS1i966jborlGalo6X/+6lyFvLOG2934ibl8Sfx3YkmVj+zPuhoiLR03R0fYPMHy4ewIHiqgoSE6+5Mm4VZ2KRN93JW+P7MyJ5FRGTFrBQ5+sYe+xMy4JmjNJZ1IYM3Mdd7z/M6VLFGPG/T14dkjEpdFQoZRtNFB4fQZDhthJZ2+/7Vd3WSqBTZtswaNRo0JeCWSHjggyYW7cfp6dFcf+48mMvLIRT0a2Drnol2Bx6mwqn67cw/tLdrD32Bma1SzPqN7NuOny+pQpmYkD2Bho3tzmVpk7t+AFDjQpKTZj6k03wQcfZNokOSWNd3/YzluLtiICD/S9jPv7Nsv8/LjEgk0HeObL9SSeOMvoPs3PTw7LlJtvhlWrYNeusHR0ZkuARwaX8Pjj8MYbtixrKGXazQI1DeWRk2dTeXXeZiYvtc7kfwxuW6hDCQ+eSObDZTuZumI3SWdSuKJJVUb3ac41rWtlPxFm2TJb+OXDD21q58LAyJF2RJCQkG2a6vijp3lh9m98tz6B+lXK8vfBbYiMcDfg4Oipc/zz2418+cteWtWuyEtDO2Q66e48KSlQvTqMGGGTpRVGgqUMkpOhfn3rF/jss8D0GWS0QlkeqVC6BH8f3JabLq/P01+s58/TfmHm6niev7FwOZO3HjzJpMXb+fKXvaSkpxPZtg6j+zbL/fyK6GgoW9Y+QRcWoqLgk09g+XLo1SvLZg2qluPNkZ25fdthnvsmjj9NXcNVl1Xn2SERtCzAFMJeYtYn8PevN3DsdAqPXNOCh65unnMY77JlcOJE4TILZSRYieo+/9xGWYVBTeKc0BFBLkhLN3y0fCcvz7HO5Eevacl9vcPXmXzk1DnmbzzAd+sT+GFzIqVLFGNY1wb8sVczmtbIQ+hgSgrUrQsDBsD06cETuKA5fhxq1IBHH4Xx43O1S2paOp/8vJv/zd3MybOp3NG9MX8Z0JLK5YJvUkw8cZZnZ21g9vr9RNSrxPihHXM/7+Hpp+Hll+HwYahUeOZKZEqgRwZ9+8LevbYmcZj4B9Q0FAASks4wblYcc+IO0LpORf59U3u6NA6Pmcl7j51hzob9zInbz8qdR0g3UL9KWYZ2acCdPRrnLx/Lt99aZ9ysWfa9MBEZCTt22EiQPJh6jpw6xyvzfueTn3ZTpVwpnoxsxR+6NgxcLQUfjDHMWruPcbPiOHU2jUcHtGB0n2Z5e0Dp1MkW6Fm0KODyhSSBUgabNu8rSm4AACAASURBVEHbtjaN+ZgxgZUxiKgiCCDzNh7g2a83kHA8mdu62ZnJoeZMNsaw9eBJ5sTtZ07cAdbvtSmFW9WuSGREbQZF1CGiXiX/7NkjRlgHcUJC4av/+9Zb8NBDNr98mzZ53j1uXxLPzdrIzzuPEFGvEs/dEEHXJtUCJt7+pGT+9tV65m86SKeGVRg/tEPeK1rt22dt3C+8AGPHBky2kCcQyiDMnMReVBEEmFOOM/mDpTuoVr40zw5x35mcnm5YtzfJ3vw37Gf7oVMAXN6oCpERdYiMqJM30092nDgBtWvb4txvvRWYPkOJ+Hho2NCvm6Qxhm/XJfCf2ZtISErmxk71GHttG+pUzn/CM2MMM1bH869vN3IuNZ0nI1txz1VN8zfimDIF7rnHzqju2DHfMoUl/iiDMHQSe1FFECQ27E3imS/Xsy4+iT4ta/J8VLsCrTaUkpbOzzuOMCduP3PjDrD/eDIligk9mldnUEQdBrWtTe1Kgcu0eJ6PP7ZRQkuW2KihwkjXrnaks2yZX92cPpfK24u28e7i7ZQoJjx09WXc17tpnvMxxR89zdNfrOfHLYfo1rQa/72lg3+K/dZbYfFiOzIopNFw2ZJfZRAdDbffbiPLBgwIrowBRhWBlyNHoFrghuhgnckfL9/Jy3M3k5KWzqMDWjCqdx5ttXkgOSWNxZsTmRN3gPmbDpB0JoUyJYvRt2VNIiPqcE3r2sF3Uno81n6+fXvhvYn861/w7LPW9FW7tt/d7T58mn/P3sicuAM0rl6Ov1/flmva1MpxFJmeboj+eTcvzt6EAcZe25rbr2zsX3771FRr0oiKgsmT899PuJMfZRCGTmIvqgjAXuTZs2HnzqDcvBKSzvDcrI3Exu2nVe2K/OfmdnRpHBilk3Qmhe9/O8CcDQf4YXMiZ1LSqFSmBAPa1CayXR36tKhJ2VIFNKFp/347NB47Fv7974I5phusW2dNJpMmwX33BazbH7ck8tw3G9l68CR9W9bkH0Pa0rzmpbUTAHYdPsWYz9exYvsRel1Wgxdubh+Y8OXly6FnT5skrQhXZQPypgy86VTCzEnsRRUBwMSJNuY3nw7A3DJ/4wGenRXH3mNnuO3KRoyJbJ2vJ/SDx5OZu/EAc+L2s3zbYVLTDbUrlWZQW2vvv7JZNXdCWF97zeYUiouzkROFFW8Suvbt7U0igKSkpfPx8l28On8zZ86lcc9VTXjkmhbni7SnpRumLNvJ+Dm/UbJYMf42uA1/6NowcD6of/zDKvHExICPkMOS3CqDxx+3WVrj4wMySixoVBGAnULfpAm88gr85S8Bl8uXU2dT+b/5m/lg6U6qlivFP4a0ZUgunMk7D51yIn3288ueY/ZeVKM8gyJq44moQ8cGVdwvedetmzUtrFnjrhwFwaOP2geIDEnoAsWhk2d5ec7vfLpqD9XLl+YpTysub1iFMZ+vY83uY/RvXYt/39Qu8HVuu3WzRdaXLg1sv+FMTsrA6yTu3x9mzHBHRj9RReClbVsbDTJnTmCFyoINe5P4f1+uZ20WzmRjDBsTjjMn7gBz4/bz2/4TALSrX4nItnWIbFeHFrUqhE5qi82bbb2Bl1+Gv/7VbWmCz/ff2+iQL74I6uzp9fFJPDtrA2t2HwOgctmSjLuhLTd2qh/4a5+YaJ9mn3sO/v73wPYd7mSnDD755EL6kTBzEntRReDl8cftBT5yBMoVTHRPWrph6opdjJ/zOylp6TxyTQuuaFKNuXH7mbNxP3uOnKGYQNcm1Yh0In1CNo3Fs89aJ+qePfbpqLDjTUIXFWXDLYOIMYavft3Lr7uP8VD/ywJaV/civFEvP/8MV1wRnGOEM1kpg3797O9+y5awcxJ7UUXgZe5cO2t09my49trACpYD+5OSee6bOGI22MLapYoXo1eLGkRG1GZAm9r5m91bkBgDLVrY3PULFrgtTcFx++02VfOBA9kmoQsb7rjjwvcJ0xta0MmoDK6+2voVw3zyXVaKAGNMvl9ANWAesMV5r5pFOw/wO7Zm8Vif9eOwRet/dV7X5ea4Xbp0MfnmzBljypY15pFH8t+Hnyzdkmi+XbvPHD9zzjUZ8sWKFcaAMR984LYkBctnn9nvvXix25L4T1qaMTVrGjNypNuShD5nzxozZIi99p06GVOihDH797stlV8Aq0wm91R/HwfGAguMMS2ABc5yRg1UHHgTuBZoC4wQEd9Qk1eNMZ2c12w/5cmZMmXsEM9bjMMFel5Wg+s71D0fIRI2REdD6dI2f31RwuOxE8u+/tptSfxnzRrrIyjM2UYDhW9xm19/hRtvDMtIodzgryKIAj50Pn8I3JhJm27AVmPMdmPMOWC6s597eDzW6bl9u6tihBUpKTbD6JAhULmy29IULBUr2kiRr7+25rFwJibGzqGJjHRbkvDAqwz+8x87d6CQ4q8iqG2MSQBw3jPLvlQf2OOzHO+s8/KwiKwTkQ9EpGBSeXqfhgoocqhQMH++fZIcOdJtSdzhhhtg61abdTKciY2FLl2gZk23JQkfSpWy6bqbN3dbkqCRoyIQkfkisiGTV26f6jOLffM+Vr0NNAc6AQnA/7KRY7SIrBKRVYmJibk8dBa0aGEnCrloHgo7oqNtuuICdrCHDDfcYN/D2Tx09CisWFF0r6GSJTkqAmPMAGNMu0xeXwMHRKQugPN+MJMu4oGGPssNgH1O3weMMWnGmHRgEtaMlJUcE40xXY0xXWv6+zQjYkcFCxbY6AAle06dgq++sqkISod4ZFOwqF/fJqELZ0Uwbx6kp6t/QLkEf01Ds4C7nM93AZn9S1YCLUSkqYiUAoY7+3mVh5ebgA1+ypN7PB57g1uypMAOGbZ8/bU9V0XVLOQlKgp++snmWgpHYmPtqK5bls9bShHFX0XwIjBQRLYAA51lRKSeiMwGMMakAg8Dc4BNwGfGmDhn/5dEZL2IrAOuBoKb98GXq6+2U+zVPJQzU6fa2djZ1O8tEkQ51tAA5x0qEIyxv/VBg6CElipXLqboTSjzpX9/m0Nm3Tr/+yqsHDwI9erBE08U6qiJXGGMdRi2bWvLdIYTa9faspQffGCL0ShFkqwmlBXtaYUeD6xfb3OLK5nz2WeQlqZmIbC+pagoG0F18qTb0uQN78hX/QNKJqgiAA0jzY7oaOjQwaZiVmz00NmzNlVJOBETY2sr1K2bc1ulyFG0FUH79vaPoX6CzNm2zYYb6mjgAr17Q9Wq4RU9dPy4TTetowElC4q2IvCGkc6bZ/PrKxcTHW3P0YgRbksSOpQoAddfb30E4fKb+f57K6vOH1CyoGgrArB/jmPHbEpe5QLGWEXQp4+NGFIuEBVl05j7WdS+wIiJsWkyevZ0WxIlRFFFMGCATcWr5qGLWb3a5mNSs9ClREaGTxI6b9jogAE2XFpRMkEVQdWq0L27KoKMREfbm93QoW5LEnpUrGirloVDErpNm2D3bvUPKNmiigDsn2TVKptUTbH25GnTrC28asHkAQw7oqKsM33jRrclyR4NG1VygSoCsH8SY6zTWLHOxQMH1CyUHUOG2PdQNw/FxtoJcI0auS2JEsKoIgCblrdGDTUPeYmOtjUHrr/ebUlCl3r1bL3fUFYEp07BDz/oaEDJEVUEYJ3FgwbZiWXp6W5L4y6nT8MXX8Att9hqbkrWREXZaLN9+9yWJHMWLbLZdVURKDmgisCLx2Pz6vz6q9uSuMs339j0Cbff7rYkoY83CV2o5h2KiYFy5ewkOEXJBlUEXgYNsu9F3Tw0darNvd+3r9uShD4REdCsWeiah2JjbZZdHdkpOaCKwEvt2tC5c9FWBIcO2e8/YoQ1lynZ401Ct2BB6CWh27rVRjXpbGIlF+i/3RePx84WTUpyWxJ3mDHDho5qtFDuiYqySehCLXFhTIx9V/+AkgtUEfji8diUywsWuC2JO0RH21DDjh3dliR8uOoqqFYt9MxDsbFw2WWFuuC6EjhUEfjSvTtUqlQ0zUM7dtgMlbffbk0eSu7wJqH77rvQSUKXnAwLF6pZSMk1qgh8KVnS5mSJjQ391AGB5pNP7Pttt7krRzjiTUIXKvWvFy+GM2fULKTkGlUEGfF4YM8em6OlqODNNNqrFzRu7LY04UdkJJQuDbNmuS2JJTbWytOvn9uSKGGCX4pARKqJyDwR2eK8Z5qYRkQ+EJGDIrIhP/sXKJGR9r0omYd+/dUqPnUS548KFUIrCV1MjA3/LVfObUmUMMHfEcFYYIExpgWwwFnOjClAZuPU3O5fcDRqZB2mRUkRREdbW/ewYW5LEr5ERcH27RAX564cO3fCb7+pWUjJE/4qgijgQ+fzh8CNmTUyxiwGjuR3/wLH47E5Wk6dcluS4JOWZv0D110H1au7LU34EipJ6LxhrOooVvKAv4qgtjEmAcB5r1XA+wcHj8fmaFm0yG1Jgs+iRZCQoGYhf6lbF7p1c18RxMRYP0+rVu7KoYQVOSoCEZkvIhsyeUUVhIA+cowWkVUisiox2HUDeveGsmWLhnkoOtoWWvE+0Sr5JyoKVq50LwnduXN2DozHoyHASp7IUREYYwYYY9pl8voaOCAidQGc94N5PH6u9zfGTDTGdDXGdK1Zs2YeD5NHypSxOVoKuyI4cwY+/xxuvtkqPsU/vEno3IoeWrrUprpQs5CSR/w1Dc0C7nI+3wXkdVzs7/7Bw+Ox+Vq2bnVbkuDx7bdw/LhmGg0UbdvambxuKYLYWDsXpn9/d46vhC3+KoIXgYEisgUY6CwjIvVEZLa3kYhMA5YDrUQkXkT+mN3+IYE36iLUcsgEkuhoa9u++mq3JSkc+CahO3Gi4I8fG2vnglSsWPDHVsIavxSBMeawMeYaY0wL5/2Is36fMeY6n3YjjDF1jTEljTENjDHvZ7d/SHDZZTbFcGE1DyUkwOzZNtNo8eJuS1N4iIqytvqCfoDYuxfWrdOwUSVf6MzirBCxttbvv7fZJQsb77xjc+M88IDbkhQueva0YbgFHT3kVTyqCJR8oIogOzweW7oxVHLIBIqzZ60iuO46O/JRAodvErqUlII7bkyMraPcvn3BHVMpNKgiyI5+/aBUqcJnHvrsM1uW85FH3JakcBIVBUePFtwDRGoqzJunYaNKvlFFkB0VKtg5BYVJERgDr70GrVvDwIFuS1M4GTTIJn0rKPPQTz/ZYkoaNqrkE1UEOeHxwIYNEB/vtiSBYcUKWL3ajgb06TE4VKhg05nPmlUwSehiYqzDf8CA4B9LKZSoIsiJwhZG+tprULky3HGH25IUbqKibLGfDRtybusvsbG2qFKVKsE/llIoUUWQExERUL9+4TAPxcfDzJnwxz/ap1YleAwZYkdcwTYPHTxoR3hqFlL8QBVBTojYUcG8eaFTijC/vPMOpKfDQw+5LUnhp04duPLK4CsCDRtVAoAqgtzg8Vhn3E8/uS1J/klOhnfftU+qzZq5LU3R4IYbYNUqO9krWMTGQq1acPnlwTuGUuhRRZAbBgywzrhwNg9Nnw6HDsGjj7otSdEh2Eno0tLsiCAyEorpX1nJP/rryQ1VqlhnXLgqAm/IaESE5hUqSNq0sRP2gmUeWr0aDh9Ws5DiN6oIcovHY4f5B/OaaTsEWLLE1iXWkNGCxZuE7vvvbZbXQBMba48xaFDg+1aKFKoIcov3qWvePHflyA8TJkDVqppu2g2iomyqiWCEH8fEwBVXQI0age9bKVKoIsgtnTvbP1y4mYd274Yvv4RRo6BcObelKXr07Gl/N4E2Dx0+DD//rGYhJSCoIsgtxYpZp9ycOTYEM1x46y3rI3jwQbclKZoULw6DBwc+Cd28efZ3qPMHlACgiiAveDyQmAi//OK2JLnj9GmYNAluvNEWNFfc4YYb4Ngx+PHHwPUZGwvVqlnTkKL4iSqCvOB1yoWLeeiTT+DIEc0y6jaDBtk62IEyD6Wn29/goEFaVEgJCKoI8kKtWtClS3goAmOsk7hDB+jTx21pijbly9u5KF9/HZgkdGvXwoED6h9QAoYqgrzi8cDy5XaoH8r88AOsX28nkGnIqPtERcGuXfaa+Iv3QSQy0v++FAU/FYGIVBOReSKyxXmvmkW7D0TkoIhsyLB+nIjsFZFfndd1me0fUng8dkbn/PluS5I9r71mSyaOGOG2JAoENgldTIxNKVGnjv99KQr+jwjGAguMMS2ABc5yZkwBshrHvmqM6eS8ZvspT/Dp3t2mcQ5l89COHTatwejRULas29IoALVr29+Ov4ogKQmWLVOzkBJQ/FUEUcCHzucPgRsza2SMWQwc8fNYoUGJEtbeGxtbMEVH8sNbb9mnTy1MH1pERdm0EP4UOVqwwI5INWxUCSD+KoLaxpgEAOe9Vj76eFhE1jnmo0xNSwAiMlpEVonIqsTExPzKGxg8HptRMi7OXTky49QpeO89uOUWaNjQbWkUXwKRhC4mBipVsqMLRQkQOSoCEZkvIhsyeUUF4PhvA82BTkAC8L+sGhpjJhpjuhpjutasWTMAh/YD77A8FM1DU6daR7aGjIYerVpBixb5Nw8ZY39zAwZAyZKBlU0p0uSoCIwxA4wx7TJ5fQ0cEJG6AM57njKyGWMOGGPSjDHpwCSgW36+RIHToAG0axd6isAbMtq5s01toIQW3iR0CxdaW39e2bjRmpXULKQEGH9NQ7OAu5zPdwF5etTxKhGHm4ACKPAaIDweO1P05Em3JbnAggX2ZqFZRkMXf5LQxcTYdw0bVQKMv4rgRWCgiGwBBjrLiEg9ETkfASQi04DlQCsRiReRPzqbXhKR9SKyDrga+Iuf8hQcHg+cOweLFrktyQUmTICaNeHWW92WRMmKHj3sNcqPeSg21taUUN+PEmBK+LOzMeYwcE0m6/cB1/ksZxrMboy5w5/ju0qvXjabZ2ysTSrmNtu2wbffwt/+ZtMZKKGJNwndl1/akUFubf0nT9oRqPp+lCCgM4vzS+nS0L9/6PgJ3nzT3mT+9Ce3JVFyIirKOvQXL879PgsX2hGozh9QgoAqAn/weOyT+Nat7spx4gS8/z4MGwb16rkri5IzAwfmPQldbKzNWdSrV/DkUoosqgj8IVTCSD/6yJZCVLNBeFCunFUGuU1CZ4x1FPfvb0eiihJgVBH4Q/Pmtji5m4ogPR1ef93mpb/ySvfkUPJGVJStHrd2bc5tt2yxaUPULKQECVUE/uLxWPttcrI7x583D37/XbOMhhuDB9vrlZtZxt6wUVUESpBQReAvHo+tBLZkiTvHnzDBZqEcNsyd4yv5o3ZtG0qaGz9BbCy0bAnNmgVfLqVIoorAX/r1g1Kl3DEPbd4Ms2fbSKFSpQr++Ip/REXBmjWwZ0/Wbc6csXNVdDaxEkRUEfhL+fK2ApgbiuCNN2wc+v33F/yxFf/JTRK6H36wZkc1CylBRBVBIPB4bCbS7J7sAs3x4zB5MgwfrgVKwpVWrewrO/NQbKwNNe3bt+DkUoocqggCgfdpLT/5Y/LLlCl2tumf/1xwx1QCzw03WNNPVknoYmKs+VELDClBRBVBIGjb1mYkLSjzkDdktEcPGzaqhC/eJHTeyCBftm+3fiA1CylBRhVBIBCxf9Z58+yfOtjExNjZzDqBLPzp3t0mocvMT+AdYaoiUIKMKoJA4fFYu/1PPwX/WBMm2FQSt9wS/GMpwaV4cVvYfvbsSx8iYmKgaVMbOqooQUQVQaC45hr7pw62eWjTJpg7Fx58UKtUFRaioqyP4IcfLqw7exa+/94+YOhEQSXIqCIIFFWqWJt9ZrbeQPLGGzbfzOjRwT2OUnAMGGCdwb7RQ0uX2vrTOn9AKQBUEQQSj8dOEDpwIDj9HzsGH34II0ZYu7JSOChXDgYNujgJXUyMHfFdfbW7silFAlUEgcTr1Js7Nzj9f/CBfUpUJ3Hh44Yb7DyUX3+1y7Gx0Ls3VKjgrlxKkUAVQSC5/HL7pB4MP0FamjUL9e5tj6MULrxJ6L7+2iqEDRvULKQUGH6VqlQyUKyYLSweE2Nv3MWLB67v776zqYhfeilwfSqhQ61a0LOnDSNt0MCu07BRpYDwa0QgItVEZJ6IbHHeq2bSpqGILBSRTSISJyKP5mX/sMPjgcOHra8gkEyYYG8QN94Y2H6V0CEqCn75BSZNstc6IsJtiZQigr8jgrHAAmPMiyIy1lkek6FNKvBXY8waEakIrBaRecaYjbncP7wYNMgO8WNjAzfrd8MGWLAAXngBSuggrtASFQVPPQU//wz33VdgYaMpKSnEx8eT7FZNDSXglClThgYNGlAylyHmYnJTKi+rnUV+B/oZYxJEpC6wyBjTKod9vgbeMMbMy8/+AF27djWrVq3Kt9xBp1s3G/GxdGlg+rv/fluOMj4eqlcPTJ9KaNKmDfz2G8ycWWATBnfs2EHFihWpXr06onMWwh5jDIcPH+bEiRM0bdr0om0istoY0zXjPv46i2sbYxKcgycAtbJrLCJNgMsB7/TbXO8vIqNFZJWIrEpMTPRT7CDj8cCKFXD0qP99HTkCH38Mt9+uSqAoMHSonVMwYECBHTI5OVmVQCFCRKhevXqeRng5KgIRmS8iGzJ5ReVRuArA58BjxpjjedkXwBgz0RjT1RjTtWaox9B7PDYx3Pz5/vf1/vu2OIlmGS0a/O1vNqV55coFelhVAoWLvF7PHA3OxpgsH01E5ICI1PUx7RzMol1JrBKINsZ84bMpV/uHHd262ZnGsbH+lZBMTbUho/36QYcOARNPCWFKl7b5hRSlAPHXNDQLuMv5fBdwSYUNsarpfWCTMeaVvO4flpQoAQMHWkXghw+GWbNg926dQKYoSlDxVxG8CAwUkS3AQGcZEaknIrOdNlcBdwD9ReRX53VddvsXCjwe2LfPRvzklwkToHFjO+tUUZSAsXPnTj755JM873f33Xczc+ZMv4//1VdfsXHjRr/7CRR+xSIaYw4D12Syfh9wnfN5CZCpwSqr/QsFkZH2PTYW2rfP+/5r19pslOPHB3ZimqJkw3PfxLFxX55deNnStl4lnh0SWnMivIrgtttuC9ox0tLSKJ7Ff/err75i8ODBtG3bNtf9paamUiJI4eOaYiJY1K9vFUB+0028/rpNRvbHPwZWLkUJQaZOnUq3bt3o1KkT999/P7t27aJFixYcOnSI9PR0evfuzdy5c9m5cyetW7fmrrvuokOHDgwdOpTTp08DsHr1avr27UuXLl2IjIwkISEBgK1btzJgwAA6duxI586d2bZtG2PHjuXHH3+kU6dOvPrqq6SlpfHkk09yxRVX0KFDB959913AhmI+/PDDtG3bluuvv56DB7N3YzZp0oR//vOf9OrVixkzZrBt2zY8Hg9dunShd+/e/PbbbyxbtoxZs2bx5JNP0qlTJ7Zt20a/fv3whsQfOnSIJk2aADBlyhSGDRvGkCFDGDRoEIsWLaJfv34MHTqU1q1bM3LkSPyZAnAeY0zYvbp06WLCgiefNKZkSWNOnMjbfomJxpQpY8z99wdHLkXxYePGja4ff/DgwebcuXPGGGMeeOAB8+GHH5pJkyaZW265xbz00ktm9OjRxhhjduzYYQCzZMkSY4wx99xzjxk/frw5d+6c6dGjhzl48KAxxpjp06ebe+65xxhjTLdu3cwXX3xhjDHmzJkz5tSpU2bhwoXm+uuvPy/Du+++a/71r38ZY4xJTk42Xbp0Mdu3bzeff/65GTBggElNTTV79+41lStXNjNmzMjyuzRu3Nj897//Pb/cv39/s3nzZmOMMStWrDBXX321McaYu+6666J++vbta1auXGmMMSYxMdE0btzYGGPM5MmTTf369c3hw4eNMcYsXLjQVKpUyezZs8ekpaWZ7t27mx9//DHL85oRYJXJ5J6q01SDicdjTTsLF9oqVLll0iRITtaQUaVIsGDBAlavXs0Vzkz8M2fOUKtWLcaNG8eMGTN45513+NWblRVo2LAhV111FQC33347EyZMwOPxsGHDBgYOHAhYs0zdunU5ceIEe/fu5aabbgLsjNvMmDt3LuvWrTtv/09KSmLLli0sXryYESNGULx4cerVq0f//v1z/D633norACdPnmTZsmUM84kcPHv2bF5PDwMHDqRatWrnl7t160YDJx9Vp06d2LlzJ7169cpzv76oIggmV10F5ctb81BuFUFKCrz1lp1QpLlmlCKAMYa77rqLF1544aL1p0+fJj4+HrA31YoVKwKXxsiLCMYYIiIiWL58+UXbjh/Pnb/DGMPrr79OpNe35zB79uw8x+SXL18egPT0dKpUqXKREsuKEiVKkJ6eDnDJRDBvf15Kly59/nPx4sVJTU3Nk3yZoT6CYFK6NPTvb7OR5taO99VXNpWEhowqRYRrrrmGmTNnnre/HzlyhF27djFmzBhGjhzJP//5T0aNGnW+/e7du8/f8KdNm0avXr1o1aoViYmJ59enpKQQFxdHpUqVaNCgAV999RVgn8hPnz5NxYoVOXHixPk+IyMjefvtt0lx6kZv3ryZU6dO0adPH6ZPn05aWhoJCQksXLgw19+rUqVKNG3alBkzZgBW2axduxbgkuM3adKE1atXAwQkKimvqCIINh6PTR+9dWvu2k+YAM2awXXX5dxWUQoBbdu25fnnn2fQoEF06NCBgQMHsnPnTlauXHleGZQqVYrJkycD0KZNGz788EM6dOjAkSNHeOCBByhVqhQzZ85kzJgxdOzYkU6dOrFs2TIAPv74YyZMmECHDh3o2bMn+/fvp0OHDpQoUYKOHTvy6quvct9999G2bVs6d+5Mu3btuP/++0lNTeWmm26iRYsWtG/fngceeIC+ffvm6btFR0fz/vvv07FjRyIiIvjaKUc6fPhwxo8fz+WXX862bdt44oknePvtt+nZsyeHDh0K7AnOBX4lnXOLkE8658v27dC8ub3B52TzX7MGunSBV16Bv/ylYORTijybNm2iTZs2bouRK3bu3MngwYPZw2xCtAAADM9JREFU4M/8nCJCZtc1WEnnlJxo1gxatMhdGOmECdancO+9wZdLURTFQZ3FBYHHA++9ZyOBsoha4OBBmDYNRo0q8IRjihIuNGnSJCRGAzfddBM7duy4aN1///vfS5zN4YIqgoLA47ETxH780eYgyoyJE+HcOXj44YKVTVGUPPPll1+6LUJAUdNQQdC3r40gyso85A0ZjYyE1q0LVjZFUYo8qggKgvLloU8fG0aaGTNnQkKChowqiuIKqggKCo8HNm2CXbsu3TZhgnUoezwFL5eiKEUeVQQFhfcmP2fOxet//tmWtfzzn6GYXg5FUQoevfMUFG3aQMOGl/oJXn8dKlaEu+7KfD9FUbLFt0bAfffd53ee/507d9KuXTu/5Tp27BhvvfWW3/0UBBo1VFCI2FHB9OnWOVyyJOzfD59+Cg88AJUquS2hosBjj0EucuPkiU6d4P/+L1dNvdkwi+VzdPzee+/la7/8kl2NAK8iePDBB/PUZ3Z1DIKFjggKkmuvhRMnwJsY6513bF1izTKqFGF27txJmzZtePDBB+ncuTMff/wxPXr0oHPnzgwbNoyTJ08C8M9//pMrrriCdu3aMXr06Ezz8Hvz+s+aNYtOnTrRqVMnWrVqRVOnDnRWNQtWr15Nx44d6dGjB2+++Wa28masEQAwfvz487UMnn32WQDGjh3Ltm3b6NSpE08++SSLFi1i8ODB5/t5+OGHmTJlCnBpHYN+/foxZswYunXrRsuWLfnxxx/9O8k5kVlu6lB/hU09gowcO2ZMiRLGPP20McnJxtSubYxPTnRFcQO36xHs2LHDiIhZvny5SUxMNL179zYnT540xhjz4osvmueee84YY87n5DfGmNtvv93MmjXLGHNxbn/fvP5ehg0bZt54441saxa0b9/eLFq0yBhjzBNPPGEiIiKylDdjjYA5c+aYUaNGmfT0dJOWlmauv/5688MPP5gdO3Zc1E/GGggPPfSQmTx5sjHm0joGffv2NY8//rgxxpjvvvvOXHPNNbk6l75oPYJQpXJl6NnT+gnatoUDBzRkVFGAxo0b0717d7799ls2btx4vt7AuXPn6NGjBwALFy7kpZde4vTp0xw5coSIiAiG5JDe/aWXXqJs2bI89NBDbNiwIdOaBUlJSRw7dux8Qrk77riDmKxCvR18awTMnTuXuXPncvnllwM2ZfaWLVto1KhRns6Bt46Bl5tvvhmALl26sHPnzjz1lVf8UgQiUg34FGgC7AT+YIw5mqFNQ+AjoA6QDkw0xrzmbBsHjAISnebPGGNmU5jxeOCZZ+D55+3ksaxmGitKEcKbc98Yw8CBA5k2bdpF25OTk3nwwQdZtWoVDRs2ZNy4cZfk7c/IggULmDFjBosXLz7fd2Y1C44dO5bvmgPefp9++mnuv//+i9pkvHn71hzwfqes+oQLdQcCVXMgO/z1EYwFFhhjWgALnOWMpAJ/Nca0AboDD4mIb8XmV40xnZxX4VYCcCGM9PffrW8gjz9ARSnMdO/enaVLl7LVSdt++vRpNm/efP6mWaNGDU6ePJljzv5du3bx4IMP8tlnn1G2bFmALGsWVKlShcqVK7NkyRLApo7OC5GRkXzwwQfnfRl79+7l4MGDl9QcaNy4MRs3buTs2bMkJSWxYMGCPB0nmPhrGooC+jmfPwQWAWN8GxhjEoAE5/MJEdkE1Af8i/EKVzp2hNq1bQK6O+90WxpFCSlq1qzJlClTGDFixPmyjs8//zwtW7Zk1KhRtG/fniZNmpwva5kVU6ZM4fDhw+dLVNarV4/Zs2czc+ZMHnnkEZKSkkhNTeWxxx4jIiKCyZMnc++991KuXLk8J44bNGgQmzZtOm/CqlChAlOnTqV58+ZcddVVtGvXjmuvvZbx48fzhz/8gQ4dOtCiRYvzpqRQwK96BCJyzBhTxWf5qDGmajbtmwCLgXbGmOOOaehu4DiwCjtyOJrFvqOB0QCNGjXqsiuzGbrhwrRpdvJYBpugorhBONUjUHJPXuoR5DgiEJH5WPt+Rv5fXoQSkQrA58BjxhhvIdG3gX8Bxnn/H5BpMn5jzERgItjCNHk5dsgxYoTbEiiKopwnR0VgjBmQ1TYROSAidY0xCSJSFziYRbuSWCUQbYz5wqfvAz5tJgHf5kV4RVGUYDFnzhzGjLnI0k3Tpk0LXQpq8N9HMAu4C3jRef86YwOx7vj3gU3GmFcybKvr+BAAbgLcrzihKEUQY0yeI2cKO5GRkWFbaCavJn9/o4ZeBAaKyBZgoLOMiNQTEW8E0FXAHUB/EfnVeXkrs78kIutFZB1wNaCFehWlgClTpgyHDx/O881DCU2MMRw+fJgyWVVDzAQtXq8oRZyUlBTi4+NzjMtXwocyZcrQoEEDSpYsedH6fDuLFUUp3JQsWfJ8Lh6laKJJ5xRFUYo4qggURVGKOKoIFEVRijhh6SwWkUQgv1OLawCHAihOuKPn4wJ6Li5Gz8fFFIbz0dgYUzPjyrBUBP4gIqsy85oXVfR8XEDPxcXo+biYwnw+1DSkKIpSxFFFoCiKUsQpiopgotsChBh6Pi6g5+Ji9HxcTKE9H0XOR6AoiqJcTFEcESiKoig+qCJQFEUp4hQpRSAiHhH5XUS2ikhm9ZWLBCLSUEQWisgmEYkTkUfdlikUEJHiIvKLiBT5uhgiUkVEZorIb87vpIfbMrmFiPzF+Z9sEJFpIpL7tJ5hQpFRBCJSHHgTuBZoC4wQkbbuSuUaqdiyoG2A7sBDRfhc+PIosMltIUKE14BYY0xroCNF9LyISH3gEaCrMaYdUBwY7q5UgafIKAKgG7DVGLPdGHMOmA5EuSyTKxhjEowxa5zPJ7B/8vruSuUuItIAuB54z21Z3EZEKgF9sAWlMMacM8Ycc1cqVykBlBWREkA5YJ/L8gScoqQI/n97dxdiVRWGcfz/SHWhlX1gZJqMiYxGlHYh1USEikREUHTRhxHVTYTGFBjYVV0lKNVFNUjThKA3MQpJHygUFA5hQzk4pZRU5owUCVE2WUM1TxdrDXOaBtOcaQ2z3h8czt57Nvu85zD7vGetvfa75gB9Dev9VP7lByCpCVgK7C0bSXEvAE8CQ6UDmQSuAI4Br+WusnZJM0oHVYLto8Am4AjwLfCT7d1loxp/NSWCsebhq3rsrKRzSXNJt9o+XjqeUiTdBnxv++PSsUwSZwHXAm22lwK/AFVeU5N0IannYD5wGTBD0uqyUY2/mhJBP3B5w/pcpmAT71RJOpuUBLbZ3lE6nsJagNslHSZ1GS6XtLVsSEX1A/22h1uJnaTEUKOVwNe2j9n+HdgB3FA4pnFXUyLoBhZKmi/pHNIFn52FYypCaZbyV4GDtp8rHU9pttfbnmu7ifR/8Z7tKfer71TZ/g7ok9ScN60ADhQMqaQjwHWSpufzZgVT8MJ5NVNV2v5D0hpgF+nKf4ftzwqHVUoLcD/QK6knb3vK9tsFYwqTy1pgW/7R9BXwYOF4irC9V1In8AlptN0+pmCpiSgxEUIIlaupayiEEMIYIhGEEELlIhGEEELlIhGEEELlIhGEEELlqhk+Guol6WLg3bx6KfAnqYQCwAnb43qDkKTpwCvA1aQ72n8EbiGdb/fafnk8Xy+EMxXDR0NVJD0NDNjeNIGvsR6YZfuJvN4MHAZmA2/mKpYhTBrRNRSqJmkgP98s6X1Jr0v6QtIGSfdJ+khSr6QFeb9ZkrZL6s6PljEOOxs4Orxi+3Pbg8AGYIGkHkkb8/HW5ePsl/RM3taU5wHYkrd35lYGOa4DefuEJbNQl+gaCmHENcBi4AfS3bTttpfliXvWAq2kOv3P294jaR7pTvXFo47TAeyWdBepS2qL7UOkwm1X2V4CIGkVsJBUIl3ATkk3kcoaNAMP2+6S1AE8mp/vABbZtqQLJu6jCDWJFkEII7rzXA2DwJfAcLnhXqApL68EXsylOXYC50s6r/EgtntIpZw3AhcB3ZJGJwuAVfmxj1TCYBEpMQD02e7Ky1uBG4HjwG9Au6Q7gRNn9nZDSKJFEMKIwYbloYb1IUbOlWnA9bZ/PdmBbA+QKlXukDQE3Eqq9tpIwLO2N/9tY5ojYvTFO+d6WctIhc/uBtYAy//9bYVwctEiCOH07CZ9AQMgacnoHSS15Dr25KJtVwLfAD8Dja2HXcBDeV4IJM2RdEn+27yGeYLvAfbk/Wbm4oCtwD9eO4T/IloEIZyex4CXJO0nnT8fAI+M2mcB0JbLFk8D3gK25379LkmfAu/YXpe7jD5MuzIArCYNbz0IPCBpM3AIaANmAm/kydMFPD7B7zVUIoaPhjDJ5K6hGGYa/jfRNRRCCJWLFkEIIVQuWgQhhFC5SAQhhFC5SAQhhFC5SAQhhFC5SAQhhFC5vwDbx1yrkt96LQAAAABJRU5ErkJggg==\n",
      "text/plain": [
       "<Figure size 432x288 with 1 Axes>"
      ]
     },
     "metadata": {
      "needs_background": "light"
     },
     "output_type": "display_data"
    }
   ],
   "source": [
    "# Compare the realized and expected returns\n",
    "# Note that they should appear correlated.\n",
    "\n",
    "# pick a random asset ID to show\n",
    "asset_idx =  4 \n",
    "\n",
    "plt.plot(expected_risky_returns[:,asset_idx],label='expected_return')\n",
    "plt.plot(risky_asset_returns[:,asset_idx],label='realized_return',color='r')\n",
    "plt.legend()\n",
    "plt.xlabel('Time Steps')\n",
    "plt.title('Realized returns vs expected returns')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "WqAdjcoFPQPp"
   },
   "source": [
    "### Compute the empirical correlation matrix using realized returns"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 34
    },
    "colab_type": "code",
    "id": "sl5JQ3_8PQPs",
    "outputId": "5f93d784-2e83-4a2c-bf2e-23bd42eb5fb8"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(99, 99)\n"
     ]
    }
   ],
   "source": [
    "cov_mat_r = np.cov(risky_asset_returns.T) \n",
    "\n",
    "print(cov_mat_r.shape)\n",
    "\n",
    "D,v = np.linalg.eigh(cov_mat_r)\n",
    "\n",
    "eigenvals = D[::-1]  # put them in a descended order"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 69
    },
    "colab_type": "code",
    "id": "H-HzoeHOPQP0",
    "outputId": "96c11580-037d-430d-fe77-5e54635ea519"
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([5.33423646e-01, 7.78434686e-03, 7.39725455e-03, 6.14590217e-03,\n",
       "       5.37148031e-03, 4.72450470e-03, 4.18736940e-03, 3.85484267e-03,\n",
       "       2.63800675e-03, 7.98779242e-17])"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# eigenvalues: the largest eigenvalue is the market factor \n",
    "eigenvals[0:10]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 156
    },
    "colab_type": "code",
    "id": "hFC61Ci94nnd",
    "outputId": "fc1d8cde-9ada-4e39-f6e6-3fe3b55559e0"
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 20.1675,  -5.1822,   5.6004,  ...,   1.2898,   5.5054,   1.1747],\n",
       "        [ -5.1822,  29.3149,  13.5833,  ...,  -0.8479,  -0.1155,  -6.1745],\n",
       "        [  5.6004,  13.5833,  44.0852,  ...,   7.2509,  -4.3631, -12.0364],\n",
       "        ...,\n",
       "        [  1.2898,  -0.8479,   7.2509,  ...,   2.5699,  -1.3894,  -1.3292],\n",
       "        [  5.5054,  -0.1155,  -4.3631,  ...,  -1.3894,  19.4548,  -0.4174],\n",
       "        [  1.1747,  -6.1745, -12.0364,  ...,  -1.3292,  -0.4174,  10.4676]],\n",
       "       dtype=torch.float64)"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "cov_mat_torch = torch.tensor(cov_mat_r)\n",
    "\n",
    "torch.pinverse(cov_mat_torch)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "ohSC6oDePQP5"
   },
   "source": [
    "### Add a riskless bond as one more asset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "F7nwaM28PQP9"
   },
   "outputs": [],
   "source": [
    "num_assets = num_risky_assets + 1\n",
    "\n",
    "bond_val = 100.0\n",
    "\n",
    "# add the bond to initial assets\n",
    "init_asset_vals = np.hstack((np.array([bond_val]),\n",
    "                            init_risky_asset_vals))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "hZQgn_CTPQQE"
   },
   "source": [
    "### Make the initial portfolio "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "PyMLBotBPQQH"
   },
   "outputs": [],
   "source": [
    "# consider here two choices: equal or equally-weighted \n",
    "\n",
    "init_port_choice =  'equal' \n",
    "\n",
    "init_cash = 1000.0\n",
    "init_total_asset = np.sum(init_asset_vals)\n",
    "\n",
    "x_vals_init = np.zeros(num_assets)\n",
    "\n",
    "if init_port_choice == 'equal': \n",
    "    # hold equal amounts of cash in each asset\n",
    "    amount_per_asset = init_cash/num_assets\n",
    "    x_vals_init = amount_per_asset * np.ones(num_assets)\n",
    "\n",
    "elif init_port_choice == 'equally_weighted':\n",
    "    amount_per_asset = init_cash/init_total_asset\n",
    "    x_vals_init = amount_per_asset * init_asset_vals\n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "Zkj8GdPDPQQP"
   },
   "source": [
    "### Make the target portfolio"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 34
    },
    "colab_type": "code",
    "id": "LyyKCaPGPQQS",
    "outputId": "bda0d8d5-b848-4c2b-c36b-3f2ac1a1bf78"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1100.0 1541.5835692311712\n"
     ]
    }
   ],
   "source": [
    "# make a target portfolio term structure by defining it as the initial portfolio growing at some fixed and high rate\n",
    "\n",
    "target_portfolio = [init_cash]\n",
    "\n",
    "target_return = 0.15 \n",
    "coeff_target = 1.1 \n",
    "\n",
    "for i in range(1,num_steps):\n",
    "    target_portfolio.append(target_portfolio[i-1]*np.exp(dt * target_return) )\n",
    "    \n",
    "target_portfolio = coeff_target*np.array(target_portfolio)    \n",
    "print(target_portfolio[0], target_portfolio[-1])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "2UMA8UwlPQQc"
   },
   "source": [
    "### Define model parameters"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 34
    },
    "colab_type": "code",
    "id": "5G_Y87m2PQQe",
    "outputId": "0cf27618-1f16-4676-9741-4e2c3ce8d466"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1133.1484530668263 3490.3429574618413\n"
     ]
    }
   ],
   "source": [
    "riskfree_rate = 0.02\n",
    "fee_bond = 0.05 \n",
    "fee_stock = 0.05 \n",
    "\n",
    "all_fees = np.zeros(num_risky_assets + 1)\n",
    "all_fees[0] = fee_bond\n",
    "all_fees[1:] = fee_stock\n",
    "Omega_mat = np.diag(all_fees)\n",
    "\n",
    "\n",
    "# model parameters\n",
    "\n",
    "lambd = 0.001 \n",
    "Omega_mat = 15.5 * np.diag(all_fees) \n",
    "eta = 1.5 \n",
    "\n",
    "beta = 100.0\n",
    "gamma = 0.95 \n",
    "\n",
    "exp_returns = expected_risky_returns\n",
    "\n",
    "Sigma_r = cov_mat_r\n",
    "\n",
    "# Generate the benchmark target portfolio by growing the initial portfolio value at rate eta\n",
    "\n",
    "target_return =  0.5 \n",
    "benchmark_portf = [ init_cash   * np.exp(dt * target_return)]\n",
    "\n",
    "rho = 0.4 \n",
    "\n",
    "for i in range(1,num_steps):\n",
    "    benchmark_portf.append(benchmark_portf[i-1]*np.exp(dt * target_return) )\n",
    "    \n",
    "print(benchmark_portf[0], benchmark_portf[-1])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "YT6003xKPQQm"
   },
   "source": [
    "### Simulate portfolio data\n",
    "\n",
    "Produce a list of trajectories, where each trajectory is a list made of state-action pairs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "So89W92zPLMq"
   },
   "outputs": [],
   "source": [
    "lambd = 0.001 \n",
    "omega = 1.0 \n",
    "beta = 1000.0 # fixed\n",
    "eta = 1.5 \n",
    "rho = 0.4"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "lCzgn95o8wUD"
   },
   "outputs": [],
   "source": [
    "reward_params=[lambd, omega, eta, rho]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "58NX65KOPQRD"
   },
   "outputs": [],
   "source": [
    "# Create a G-learner\n",
    "G_learner = G_learning_portfolio_opt(num_steps,\n",
    "                 reward_params,  \n",
    "                 beta,                \n",
    "                 benchmark_portf,\n",
    "                 gamma, \n",
    "                 num_risky_assets,\n",
    "                 riskfree_rate,\n",
    "                 expected_risky_returns, # array of shape num_steps x num_stocks\n",
    "                 Sigma_r,     # covariance matrix of returns of risky matrix                    \n",
    "                 x_vals_init, # array of initial values of len (num_stocks+1)\n",
    "                 use_for_WM = True) # use for wealth management tasks"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 34
    },
    "colab_type": "code",
    "id": "mhy6IPKs7nRq",
    "outputId": "78fbc7d6-2869-48a9-f754-c9ed7c5127c6"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Doing G-learning, it may take a few seconds...\n"
     ]
    }
   ],
   "source": [
    "G_learner.reset_prior_policy()\n",
    "error_tol=1.e-8 \n",
    "max_iter_RL = 200\n",
    "G_learner.G_learning(error_tol, max_iter_RL)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 34
    },
    "colab_type": "code",
    "id": "ZJbI_k9mPQQo",
    "outputId": "45fc8ba1-fb6d-450b-8502-b96ffa56419a"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Done simulating trajectories in 37.617893 sec\n"
     ]
    }
   ],
   "source": [
    "num_sim = 1000\n",
    "trajs = []\n",
    "np.random.seed(0)\n",
    "torch.manual_seed(0)\n",
    "t_0 = time.time()\n",
    "\n",
    "\n",
    "x_vals = [x_vals_init]\n",
    "returns_all = []\n",
    "for n in range(num_sim):\n",
    "    this_traj = []\n",
    "    x_t = x_vals_init[:]\n",
    "    returns_array = []\n",
    "    for t in range(0,num_steps):\n",
    "        \n",
    "        \n",
    "       \n",
    "        mu_t = G_learner.u_bar_prior[t,:] + G_learner.v_bar_prior[t,:].mv(torch.tensor(x_t))\n",
    "        u_t = np.random.multivariate_normal(mu_t.detach().numpy(), G_learner.Sigma_prior[t,:].detach().numpy())\n",
    "        # compute new values of x_t\n",
    "\n",
    "        x_next = x_t +u_t\n",
    "        # grow this with random return\n",
    "        \n",
    "        idiosync_vol =  0.05 # vol_market     \n",
    "        rand_norm = np.random.randn(num_risky_assets)\n",
    "        \n",
    "        # asset returns are simulated from a one-factor model\n",
    "        risky_asset_returns = (expected_risky_returns[t,:] + beta_vals * (returns_market[t] - mu_market * dt) \n",
    "                         + idiosync_vol * np.sqrt(1 - beta_vals**2) * np.sqrt(dt) * rand_norm)\n",
    "        \n",
    "        returns = np.hstack((riskfree_rate*dt, risky_asset_returns))\n",
    "        \n",
    "        x_next = (1+returns)*x_next\n",
    "        port_returns=(x_next.sum() -x_t.sum() -np.sum(u_t) - 0.015*np.abs(u_t).sum())/x_t.sum()\n",
    "        \n",
    "        this_traj.append((x_t, u_t))\n",
    "        \n",
    "        # rename\n",
    "        x_t = x_next\n",
    "        returns_array.append(port_returns) \n",
    "    # end the loop over time steps\n",
    "    trajs.append(this_traj)\n",
    "    returns_all.append(returns_array)\n",
    "\n",
    "print('Done simulating trajectories in %f sec'% (time.time() - t_0))        "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "bpnem5H5vZ5H"
   },
   "source": [
    "### Calculate performance of G-learner (Diagnostics only)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "4Z731JGirsdV"
   },
   "outputs": [],
   "source": [
    "returns_all_G=returns_all"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 34
    },
    "colab_type": "code",
    "id": "UIYUAIWvO04X",
    "outputId": "7e9f5055-b670-4a22-c6cf-d8082c03c9c6"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "-0.2835972743224643\n"
     ]
    }
   ],
   "source": [
    "SR_G=0\n",
    "for i in range(num_sim):\n",
    "  SR_G+=(np.mean(returns_all_G[i])-riskfree_rate*dt)/np.std(returns_all_G[i])\n",
    "\n",
    "SR_G/=num_sim\n",
    "print(SR_G)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "TT9dIJP_t0K_"
   },
   "outputs": [],
   "source": [
    "r_G=np.array([0]*num_steps, dtype='float64')\n",
    "for n in range(num_steps):\n",
    "  for i in range(num_sim):\n",
    "    r_G[n]+=returns_all_G[i][n]\n",
    "  r_G[n]/=num_sim\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 296
    },
    "colab_type": "code",
    "id": "KAdjSXoHui7R",
    "outputId": "9f5ea1f7-38b7-4aff-ef5a-e9ef9788651d"
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Text(0, 0.5, 'Sample Mean Returns')"
      ]
     },
     "execution_count": 30,
     "metadata": {},
     "output_type": "execute_result"
    },
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZcAAAEHCAYAAABiAAtOAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAgAElEQVR4nO3deXxU5fX48c8hrCIIKiCrLEZQVCJGFkVHRBoEKyguqFVQ64rW7Vuxrb/WVlvFtYpbwQ23AIoIVhQRzeCGAoIIArJaI8gqi8rO+f3x3IEhTJJJcmfuzOS8X695TebOXU5CyJn7LOcRVcUYY4zxU5WgAzDGGJN5LLkYY4zxnSUXY4wxvrPkYowxxneWXIwxxvjOkosxxhjfVQ3y4iLSC3gUyAKeUdX7irwv3vu9gV+BQar6pYi0BUZH7doa+Kuq/ltE7gKuAtZ47/1ZVSeWFMehhx6qLVu29OE7MsaYymPmzJlrVbVBrPcCSy4ikgU8AfQECoHpIjJBVb+J2u1MINt7dAaeAjqr6kIgJ+o8PwDjoo57RFUfjDeWli1bMmPGjIp8O8YYU+mIyHfFvRdks1gnYLGqLlXV7cAooG+RffoCL6ozDagnIo2L7NMDWKKqxX6TxhhjkivI5NIU+D7qdaG3raz7DADyi2y7QUTmiMhzIlI/1sVF5GoRmSEiM9asWRNrF2OMMeUUZHKRGNuK1qIpcR8RqQ6cDbwW9f5TQBtcs9lK4KFYF1fV4aqaq6q5DRrEbDI0xhhTTkF26BcCzaNeNwNWlHGfM4EvVXVVZEP01yIyAvhveYLbsWMHhYWFbN26tTyHmwxSs2ZNmjVrRrVq1YIOxZi0EWRymQ5ki0grXIf8AODiIvtMwDVxjcJ16G9U1ZVR719EkSYxEWkctc85wNzyBFdYWEidOnVo2bIlbtCaqYxUlXXr1lFYWEirVq2CDseYtBFYclHVnSJyAzAJNxT5OVWdJyLXeu8/DUzEDUNejBuKfHnkeBE5ADfS7Joip75fRHJwzWfLY7wfl61bt1piMYgIhxxyCNYvZ0zZBDrPxZt/MrHItqejvlZgcDHH/gocEmP7pX7FZ4nFgP0eGFMeNkPfGGMqq8ceg7feSsipLbmksFWrVnHxxRfTunVrTjjhBLp27cq4ceNi7jto0CBef/31hMd00kkn+XauWbNm8fvf/z7me8uWLaNz585kZ2dz4YUXsn379v32mT17Nl27dqV9+/Ycd9xxjB69t2jDlClT6NixIzk5OXTr1o3Fixfvc+z06dPJysra8zPbvn07p556Kjt37vTt+zMmpf30E/zpT/Dmmwk5vSWXFKWq9OvXj1NPPZWlS5cyc+ZMRo0aRWFhYUKvW9of108//dS3a/3rX//ixhtvjPnekCFDuOWWW1i0aBH169fn2Wef3W+fAw44gBdffJF58+bx7rvvcvPNN7NhwwYArrvuOl555RVmz57NxRdfzD333LPnuF27djFkyBDy8vL2bKtevTo9evTYJ0EZk9Gefx5+/RWK+T9YUZZcUtQHH3xA9erVufbaa/dsO/zww4v9Yxxt5syZhEIhTjjhBPLy8li50g2eGzFiBCeeeCIdOnSgf//+/Prrr4C767n11lvp3r07Q4YM4a677uKKK67gtNNOo3Xr1jz22GN7zn3ggQcCUFBQwGmnncZ5551Hu3btuOSSS4gsmT1x4kTatWtHt27d+MMf/sBZZ521X4ybN29mzpw5dOjQYb/3VJUPPviA8847D4CBAwfyZoxPV0ceeSTZ2dkANGnShIYNG+7peBcRNm3aBMDGjRtp0qTJnuOGDRtG//79adiw4T7n69evH6+88kqpP19j0t6uXfDEE9CtG+TkJOQSgXbop42bb4bZs/09Z04O/Pvfxb49b948OnbsWObT7tixgxtvvJHx48fToEEDRo8ezV/+8heee+45zj33XK666ioA7rzzTp599tk9yerbb7/l/fffJysri7vuuosFCxbw4YcfsnnzZtq2bct111233zyPWbNmMW/ePJo0acLJJ5/MJ598Qm5uLtdccw1Tp06lVatWXHTRRTHjnDFjBsccc0zM99atW0e9evWoWtX9ejZr1owffvihxO/7iy++YPv27bRp0waAZ555ht69e1OrVi3q1q3LtGnTAPjhhx8YN24cH3zwAdOnT9/nHMccc8x+24zJSO+8A0uXwr33JuwSdueSJgYPHkyHDh048cQTS9xv4cKFzJ07l549e5KTk8M999yzpylt7ty5nHLKKRx77LG88sorzJs3b89x559/PllZWXte9+nThxo1anDooYfSsGFDVq1atd+1OnXqRLNmzahSpQo5OTksX76cBQsW0Lp16z1zQopLLitXrqS4ygiRO6BoJY3YWrlyJZdeeinPP/88Vaq4X+lHHnmEiRMnUlhYyOWXX86tt94KwM0338zQoUP3+V4jsrKyqF69Ops3by72WsZkhGHDoGlTOOechF3C7lziUcIdRqK0b9+esWPH7nn9xBNPsHbtWnJzcwG4/PLLmTVrFk2aNGHixL2juVWV9u3b89lnn+13zkGDBvHmm2/SoUMHXnjhBQoKCva8V7t27X32rVGjxp6vs7KyYvbFxNonVmKIpVatWvtUP8jLy2PVqlXk5uYyYsQINmzYwM6dO6latSqFhYX7NGtF27RpE3369OGee+6hS5cuAKxZs4avvvqKzp07A3DhhRfSq1cvwN0xDRgwAIC1a9cyceJEqlatSr9+/QDYtm0bNWvWjOt7MCYtLVwI770Hd98NCaw6YXcuKer0009n69atPPXUU3u2RfpIAJ5//nlmz569T2IBaNu2LWvWrNmTXHbs2LHnDmXz5s00btyYHTt2JKxvoV27dixdupTly5cDFNtBftRRR+0zgmvSpEnMnj2bZ555BhGhe/fue0ZyjRw5kr59ixbMdiO8zjnnHC677DLOP//8Pdvr16/Pxo0b+fbbbwGYPHkyRx11FOBGoS1fvpzly5dz3nnn8eSTT+5JLOvWraNBgwZW5sVktscfh+rV4eqrE3oZSy4pSkR48803CYfDtGrVik6dOjFw4ECGDh1a4nHVq1fn9ddfZ8iQIXTo0IGcnJw9I7zuvvtuOnfuTM+ePWnXrl1C4q5VqxZPPvkkvXr1olu3bjRq1IiDDjpov/3atWvHxo0bi22CGjp0KA8//DBHHHEE69at48orrwTcnUdk+PKYMWOYOnUqL7zwAjk5OeTk5DB79myqVq3KiBEj6N+/Px06dOCll17igQceKDX2Dz/8kN69e1fguzcmxW3aBC+8ABdeCEUGtPhN4m3GyGS5ubladLGw+fPn7/m0a8rm559/5sADD0RVGTx4MNnZ2dxyyy377ffII49Qp06dYue6JNu5557LvffeS9u2bfd7z34fTEYYNgz+8Af44gsopf82HiIyU1VzY71ndy7GdyNGjCAnJ4f27duzceNGrrkmdnm36667bp9+myBt376dfv36xUwsxmSE3btdk1jnzr4kltLYnQt252JKZ78PJu299x7k5cHLL8Mll/hySrtzKSdLvAbs98BkiGHDoFEjiBr8kkiWXIpRs2ZN1q1bZ39YKrnIei42PNmktaVL4e234Zpr3EixJLB5LsVo1qwZhYWFto6H2bMSpTFp64knICvLJZckseRSjGrVqtnKg8aY9PfLL/Dcc3DeeVDMZORECLRZTER6ichCEVksInfEeF9E5DHv/Tki0jHqveUi8rWIzBaRGVHbDxaRySKyyHuun6zvxxhjUs7LL8OGDQmrflycwJKLiGQBTwBnAkcDF4nI0UV2OxPI9h5XA08Veb+7quYUGa1wBzBFVbOBKd5rY4ypfFRdR/7xx0PXrkm9dJB3Lp2Axaq6VFW3A6OAojU++gIvqjMNqCcijUs5b19gpPf1SKCfn0EbY0zaKCiAefPcXUuSl+sOMrk0Bb6Pel3obYt3HwXeE5GZIhJdJKeRqq4E8J5j1jgQkatFZIaIzLBOe2NMRho2DA45BLxirckUZHKJlUaLjvstaZ+TVbUjrulssIicWpaLq+pwVc1V1dziSr8bY0za+u47GD8erroKatVK+uWDTC6FQPOo182AFfHuo6qR59XAOFwzG8CqSNOZ97za98iNMSbVRSqqX3ddIJcPMrlMB7JFpJWIVAcGABOK7DMBuMwbNdYF2KiqK0WktojUARCR2sBvgLlRxwz0vh4IjE/0N2KMMSllyxYYMQL69YMWLQIJIbB5Lqq6U0RuACYBWcBzqjpPRK713n8amAj0BhYDvwKXe4c3AsZ5qxNWBV5V1Xe99+4DxojIlcD/gOTUOjDGmFSRnw/r1yd9+HE0K1xJ7MKVxhiTllShY0fYuRPmzEnoKLGSClfaDH1jjMkkn3wCs2fDf/6T9OHH0axwpTHGZJJhw6BePd/K6peXJRdjjMkUP/wAY8fClVdC7dqBhmLJxWSeZ5+F++6DXbuCjsSY5Hr6abfi5PXXBx2J9bmYDLN1K9x6K2zaBJ9+Cq+8AnXqBB2VMYm3bRsMHw5nnQWtWwcdjd25mAzzzjsusQwcCBMnwkknwbJlQUdlTOKNGQOrVwc6/DiaJReTWfLzoUEDeOYZmDTJtUF36gQffRR0ZMYk1rBh0K4dnHFG0JEAllxMJtm8Gd56y60RXrUq9OgBn3/uCvf16OEWTDImE33+OUyfDjfcEOjw42iWXEzmGD/e9blcdNHebdnZMG0adO/uRtDcdpt19JvMM2yY61u87LKgI9nDkovJHPn50Ly562eJVq8evP02/OEP8PDD8NvfwsaNwcRojN9+/NH1t1x+eUoNXrHkYjLDunXw3ntu3YoqMX6tq1aFRx91s5YnT3ar8i1Zkvw4jfHbiBGwYwcMHhx0JPuw5GIyw+uvu1pK0U1isVx9tUtCq1a5jv6CgqSEZ0xC7Njh5rb06gVHHhl0NPuw5GIyQ34+tG0LOTml79u9u+sAbdQIevZ0cwOMSUdvvAErVqTM8ONollxM+vvhB5g61d21xDtS5ogj4LPPXHK55hrXH7NzZ2LjNMZvw4a53+VevYKOZD+WXEz6Gz3alRkvrUmsqIMOckOXb7nF/Sft0wc2bEhMjMb4bdYsVwF58ODY/YwBs/IvJv3l57v1K8rT5pyV5UaQtW/vloPt0sUlnOxs/+M0/tm1y91p7tgR3yN632bN3L93uhs2zBWnvPzy0vcNQKDJRUR6AY/iVqJ8RlXvK/K+eO/3xq1EOUhVvxSR5sCLwGHAbmC4qj7qHXMXcBWwxjvNn1V1YhK+HROERYtgxgx44IGKnefKK11C6d8fOneG115zEy+NP9asgTvucBNd400CJT0qssihCIwbB337+vf9JdvatfDqq3DFFe4OPAUFllxEJAt4AugJFALTRWSCqn4TtduZQLb36Aw85T3vBG7zEk0dYKaITI469hFVfTBZ34sJ0KhR7vnCCyt+rlNPhS++cPNg8vLgscdSorpsRhg92lVIaNsWqleHatX2fRxwgHuuWnX/92I94tkv1j5Vq7pm0Isvdv10J5wQ9E+mfJ55xhWqTLHhx9GCvHPpBCxW1aUAIjIK6AtEJ5e+wIvq1mKeJiL1RKSxqq4EVgKo6mYRmQ80LXKsyXSqrknslFPc5Ek/tGrlqilfcon7jztvHvz73+4Pkym/cNj9G82fH3x5kgkTXPPnWWe5UYMtWgQbT1nt3AlPPgmnn57SzXtB9gI1Bb6Pel3obSvTPiLSEjge+Dxq8w0iMkdEnhOR+rEuLiJXi8gMEZmxZs2aWLuYVDdnjvtjVdaO/NLUrQtvvgl//KP7T3zmmbB+vb/XqExUXXI57bTgEwvAYYe5ig1btrhBHOlWrWHCBPj++5QcfhwtyOQS67esaENqifuIyIHAWOBmVd3kbX4KaAPk4O5uHop1cVUdrqq5qprboEGDssZuUkF+vuuQP+88/8+dlQX33w/PP+8qKnfuDAsW+H+dymD+fNfnEgoFHcle7du7FRsXLHCFTnfsCDqi+A0bBocf7ppvU1iQyaUQiG7LaAasiHcfEamGSyyvqOobkR1UdZWq7lLV3cAIXPObyTSqrr+lZ09XYj9RBg2CDz5wn267dHGz+03ZhMPu+bTTAg1jPz167C0HdP31FRskkCxff+2qSlx/vfsAlMLKlFxEpL6IHOfTtacD2SLSSkSqAwOACUX2mQBcJk4XYKOqrvRGkT0LzFfVh4vE2Djq5TnAXJ/iNanks8/gu+/8bxKL5eSTXTnzww93TWSPPZYef4hSRUEBNG2aEqsj7ueKK+DPf3Yd5PffH3Q0pXv8cahZ041uTHWqWuIDKADqAgcD/wNmAg+Xdlw8D9wQ42+BJcBfvG3XAtd6XwtuRNkS4Gsg19veDdc8NgeY7T16e++95O07B5ecGpcWxwknnKAmzdxwg2rNmqobNybvmps3q/btqwqqV1+tun178q6drnbvVm3USPXii4OOpHi7dqkOGOD+XUePDjqa4q1fr3rAAapXXhl0JHsAM7S4v+/FvaF7E8As7/n3wN+9r+eUdlw6PSy5pJkdO1QbNlTt3z/51961S/WOO9x/ndNOU127NvkxpJMFC9zPavjwoCMp2ZYtqiefrFqjhuqnnwYdTWwPPuh+lrNnBx3JHiUll3iaxap6TU0XAP8tx82RMf768EO3VngymsSKqlIF7r0XXnrJNc116gTf2Aj4YkWqTqdSZ34sNWu6EYLNm8PZZ6fecgy7dsETT7hh9x06BB1NXOJJLv8AJuHmpEwXkdbAosSGZUwJ8vPdoki9ewcXw+9+5/5w/vKLWxvmnXeCiyWVhcNu6G86lNM59FCYOBF273ZDlFNp+PnEibBsWcoPP45WanJR1ddU9ThVvd57vVRV+yc+NGNi2LbNlRk/5xyoVSvYWLp0cR39rVu7CXkPP2wd/dFSbX5LPLKz3R3MsmVw7rnu9y0VDBvmBkX06xd0JHErNbmISAMR+bOIDPcmJT4nIs8lIzhj9vPOO25YcBBNYrE0bw4ff+z+0992G/z+97B9e9BRpYbFi91aI6neJFbUKae4+U3hMFx1VfAfGBYscMOlr7surSpFxFP+ZTzwEfA+sCux4RhTivx813yRSkUla9d2hS7vugvuvtsV0xw7NrHzb9JBZH5LuiUXcLXHliyBv/4V2rSBv/0tuFgef9zVY7vqquBiKId4kssBqjok4ZEYU5qff3bl8AcNSr1PcFWqwD/+AUcf7Uqgd+rkYj3mmKAjC044DA0bQrt2QUdSPnfeCUuXug8NrVvDpZcmP4ZNm2DkSBgwwP0s00g8Hfr/FZEAe06N8Ywf7+pBpUqTWCwDBrg/qtu2uY7+qVODjigYqm7AQyiUPv0tRYm4Gfzdu7tJi5E7sWR64QX3oSqNOvIj4kkuN+ESzBYR2SQim0VkU6lHGeO3/Hy30NPJJwcdSck6dXId/QcdVPF1ZtLVsmVQWJieTWLRqld3TZxt2rhBJAsXJu/au3e7JrEuXSA3N3nX9UmJycUrs9JeVauoai1VrauqdVS1bpLiM8ZZtw4mTXJ3Bim4pOt+mjZ1i1F9+GHqjDhKplStJ1Ye9eu7ocBVq7rh78mqov7ee67/Lg3vWqCU5OLNwByXpFiMKd7YsW4di1RuEisqL8/Ng/nkk6AjSb6CAjfw4uijg47EH61auT60FSvch4YtWxJ/zWHD3ByhRFT9ToJ4PgJOE5ETEx5JOvr5Z7cuxE8/BR1J5svPhyOPhOOPDzqS+HXv7j7tTpoUdCTJFw67lT3Ttb8lls6d4eWXYdo0GDjQNVslyuLFbtj9Nde4prk0FE9y6Q58JiJLvAW4vhaROYkOLC3MmeMmz33wQdCRZLYffnB/rC66KL3+WNWp4/qHKltyWb7cVazOhCaxovr3d9WTX3vNVVNOlCefdCX1r7kmcddIsHiGIp+Z8CjSVW6umyUeDrtfOpMYY8a40Ufp1CQW0asX/OlP8OOPromjMkjn+S3xuO02d2cxdKjr6Pd7/snPP8Nzz7lFzBo3Ln3/FBXPnYsW8zDVq8NJJwUzRLEyyc93zWFt2wYdSdnl5bnnyrTIWDgMBx+cuXN8RNworl693Kx5v/9tX37ZVaFI0478iHiSy9u4ashvA1OApYBV6YsIhdzqcKlU5C6TLF7shvWm410LuAq2DRtWrqaxSH9LOozqK6+qVWH0aDdg4bzzYK5PaxKqusR1wgluCHIai6dw5bFe4cpjVTUbt2zwx4kPLU2EQu4X4qOPgo4kM40a5Z4vvDDYOMqrShX4zW/cp9tEdgCniu+/d7PaM7VJLFrdum5AT6RC98qVFT/nhx/CvHnuriWd+hdjKPNHC1X9EvBl9JiI9BKRhSKyWETuiPG+iMhj3vtzRKRjaceKyMEiMllEFnnP9f2ItVidOkGNGtY0lgiqrkmsWzdo0SLoaMovLw/WroUvvww6ksTL9P6Wopo3d0OU16+H3/7WDT2viGHD3BDudP0wFSWeqsi3Rj3+T0ReBSo8i0hEsnBLGJ8JHA1cJCJFB8WfCWR7j6uBp+I49g5gineXNcV7nTg1a7rbV0su/vv6a7cQV7o2iUX85jfuuTI0jYXDUK8eHHdc0JEkT8eO7g571ixX8HJXOev7fvcdTJjgBgjUrOlvjAGI586lTtSjBq7vpa8P1+6EW4BsqapuB0bFOG9f4EVvRc1pQD1vVcySju0LjPS+HgkkfgGEUAhmz3adcMY/+fluOOb55wcdScU0bOgGJFSG5FJQ4ErWZ2UFHUlynXUWPPqoSw633Va+czz5pGsKu+46f2MLSDzJ5RtV/bv3+KeqvgL81odrNwW+j3pd6G2LZ5+Sjm2kqisBvOeYpURF5GoRmSEiM9ZUtJxDKOTa0z+2rijfqLpPg2eckRml63v1cssib8rgsnwrVrgBGJWlSayoG26Am25ySWbYsLIdu2ULPPOMWxeoefPExJdk8SSXP8W5raxi9VYVHeJc3D7xHFsiVR2uqrmqmtugon+8unRxJeCtacw/06a5yXjp3iQWkZfnytdk8oTbTKonVl4PPQRnnw033+z6YuL16quu3ybNhx9HK3YSpYicCfQGmorIY1Fv1QV2+nDtQiA6RTcDVsS5T/USjl0lIo1VdaXXhLbah1hLdsABrmPfkot/8vPdQIlzzgk6En907QoHHgjvvptWS9WWSUGBG0GVkxN0JMHJynKJIhRyRVY/+sj1yZRE1d3pHHusG8KdIUq6c1kBzAC2AjOjHhOAPB+uPR3IFpFWIlIdGOCdO9oE4DJv1FgXYKPX1FXSsROAgd7XA3EraSZeKAQzZ8LmzUm5XEbbudPNyu/Tx/2xygTVq8Ppp7t+l6CXzU2UcNiN7Kts/S1F1a7t7loOPdT1xXz/fcn7f/wxfPVVRgw/jlZsclHVr1R1JHAEMAaYpqojVfUNVa1wpUZV3QncAEwC5gNjVHWeiFwrItd6u03ETdpcDIwAri/pWO+Y+4CeIrII6Om9TrxQyI0S+fTTpFwuoxUUwKpVmdMkFpGX55r6Fi0KOhL//fijW+ukMjeJRWvc2M2B+eUX9yGppL62YcNcWf9LLklefEkQT22xXsCDuKaoViKSA/xDVc+u6MVVdSIugURvezrqawUGx3ust30dkPwF1k86yX1iC4f3lvww5ZOf7yam9ekTdCT+ivxeTJrkKjxnkso2vyUexxzjClz27g0XXODuZoouz11YCG+8Abfc4prXM0g8Hfp34Yb+bgBQ1dlAy8SFlKYOPNAVsrR+l4rZts2t3dKvnysKmknatIEjjsjMIcnhsPs/UFr/QmXzm9/AU0+5f/Mbb9y/SfTpp91I0+uvDya+BIonuexUVZvAEY9QyNXB+vXXoCNJX+++6+YLZVqTWEReXmauThnpb6kaT2NIJXPVVTBkCPznP/Dgg3u3b90Kw4e7mf2tWgUXX4LEk1zmisjFQJaIZIvIMMA6FmIJhWDHDjefwZRPfj4ccoib35KJ8vLch49MmhO1erWrpGBNYsX7179c09jtt8Prr7ttY8a4JZMzaPhxtHiSy41Ae2Ab8CqwCbg5kUGlrW7dXKFCaxorn59/djOcL7hg/7bpTNG9u/veMqlpbOpU92zJpXhVqsALL7gh6Zde6uZxDRsGRx0FPZLfRZwM8VRF/lVV/6KqJ3qPvwCNkhBb+qlb15X5sORSPhMmuJnKmdokBq5fItNWpwyHXWd0bm7QkaS2WrVg/Hho0sT1xcyY4Wb1Z9Dw42glJhcR6Soi54lIQ+/1cV7hygy6p/dZKASff+7aU03Z5OdDs2buj28my8tzS2T7UaI9FRQUuH+zTL3b9FODBjBxouubqlsXLrss6IgSptjkIiIPAM8B/YG3ReRvwGTgc1yVYhNLKOQ6az//POhI0sv69e7T/IABmb3IFGTW6pRr17qFsmx+S/zatnX9spMnuzvZDFXS0I4+wPGqutVbE2UFcJyqZuAMMB+dcoq7zQ2HrQ26LMaOdYMhMrlJLKJDB2jUyCXTgQNL3z+VRRbJs9/1sknHJbvLqKSPiFtUdSuANyN/oSWWONSv79aysH6XssnPdxMLjz8+6EgSL3p1yvKu/ZEqCgpcX8KJvqwfaDJIScmljYhMiDyAlkVem+KEQu62d/v2oCNJDytWuD9SF12UsZ2b+8nLg3Xr0n91ynDYVaeoXj3oSEyKKalZrOjCXQ8lMpCMEgrBY4+5CZWZ3jnthzFj3MzlytAkFtGzp3ueNCl9P/WvX+8GJvz970FHYlJQsclFVa1dp7wiZbPDYUsu8cjPd81hlaAdeo+GDV2plEmT4M47g46mfD76yH0osP4WE0OGD8sJyKGHQvv21u8SjyVL4IsvKtddS0Renms+TdflscNht9Z7p05BR2JSkCWXRAmF4JNP3AgoU7xRo9zzhRcGG0cQevVyHfrpujplOOxWYa1ZM+hITAqy5JIooZBbyyHdO2wTLT/flc1p0SLoSJKva1e3tEA6ztbfsAFmzbImMVOsUkuYisiRwB+Bw6P3V9XTExhX+ovud+ncOdhYUtXXX8O8efDEE0FHEoxq1dzqlO++6/ou0mmk3Mcfu5ht8qQpRjx3Lq8BXwJ34uLuUa4AACAASURBVJJM5FFuInKwiEwWkUXec/1i9uslIgtFZLGI3BG1/QERWSAic0RknIjU87a3FJEtIjLbezwd67xJcdhhroPa+l2Kl5/vFlg7//ygIwlOXh589x18+23QkZRNOOyGH9sHJ1OMeNdzeUpVv1DVmZFHBa97BzBFVbOBKd7rfYhIFvAEcCZwNHCRiBztvT0ZOEZVjwO+Bf4UdegSVc3xHtcSpFDIfcJL94lyiaDq+lvOOMPVW6qsolenTCcFBS6xZNqCbsY38SSXt0TkehFp7N1xHCwiB1fwun2Bkd7XI4F+MfbpBCxW1aWquh0Y5R2Hqr6nqju9/aYBzSoYT2KEQm7t7Nmzg44k9Xz+OSxbVjlHiUVr3Tr9VqfctMn1JVqTmClBPMllIK4Z7FNgpveYUcHrNlLVlQDec8MY+zQFvo96XehtK+oK4J2o161EZJaIhEXklOICEJGrRWSGiMxYs2ZN2b+DeEQ6O61pbH/5+VCjBpxzTtCRBK9XL3cnkC6rU37yiVua1zrzTQniWc+lVYxH69KOE5H3RWRujEfRmf/FniJWOEWu8RdgJ/CKt2kl0EJVjwduBV4VkbrFfF/DVTVXVXMbJKpZpmlTt266JZd97drlZuX36ePKjld26bY6ZUGBG4zQtWvQkZgUFteC1yJyDK7fY8+AdlV9saRjVLXYdWpFZJWINFbVlSLSGFgdY7dCoHnU62a4ysyRcwwEzgJ6qKp619yGWzETVZ0pIkuAI6n4nVb5hUIwbpz7pJfppeTjVVAAP/5oTWIRp53m/li/+256rEoYDruJkwccEHQkJoWV+tfOW8dlmPfoDtwPnF3B607ANbfhPY+Psc90IFtEWolIdWCAdxwi0gsYApytqr9GxdrAGwiAiLTGrTuztIKxVkwoBD/95IbdGic/383v6NMn6EhSw4EHurk+6dDv8vPPbgVFaxIzpYjno/R5QA/gR1W9HOgA1Kjgde8DeorIIqCn9xoRaSIiEwG8DvsbgEnAfGCMqs7zjn8cqANMLjLk+FRgjoh8BbwOXKuq6ysYa8VYv8u+tm1za7f062cjjaLl5bkPICtWlL5vkD75xDVrWnIxpYgnuWxR1d3ATq//YjVQap9LSVR1nar2UNVs73m9t32FqvaO2m+iqh6pqm1U9Z9R249Q1eZFhxyr6lhVba+qHVS1o6q+VZE4fXH44e5hycWZNMnN7rYmsX2ly+qU4bBbovekk4KOxKS4eJLLDG+S4gjcSLEvgS8SGlWmCYVg6lQ3t6Oyy8+HQw5x81vMXscdt3d1ylRWUAC5uRm9PK/xRzyjxa5X1Q2q+jSuCWug1zxm4hUKubXGv/km6EiC9csvMGGCm5FfrVrQ0aSWKlXc3cvkyak76faXX9waRdYkZuIQT4e+iMjvROSvqroc2CAiVmO7LKzfxZkwwQ25tSax2CKrU86saAGMBPnsM9i50yZPmrjE0yz2JNAViPxF2Iwry2Li1bq1m/NS2ZNLfj40a+ZGRpn99ezpilematNYQYGrBWcL4Jk4xJNcOqvqYGArgKr+BNiC2WUh4u5ewuHK2++yfr2bx3HhhTbfpzgNGuxdnTIVhcMuvjp1go7EpIF4/pfv8OaOKLi5JMDuhEaViUIhWLUq/arf+uWNN9zCadYkVrK8PJg2LfVWp/z1V7diqDWJmTjFk1weA8YBDUXkn8DHwL8SGlUmquz9Lvn5kJ3tPvma4uXluQ79KVOCjmRf06bB9u3WmW/iFs9osVeA24F7cbW7+qnqa4kOLOMceaQbajp1atCRJN/KlfDhh+6uJZ0WxApCqq5OGQ675kzrLzNxKra2WJGy+quB/Oj3Ap/5nm6K9rtUpj+yY8a479maxEpXrZqrL5Zqq1OGw3D88XDQQUFHYtJESXcua4HZuKKPM9hbbt+PkvuVUygEhYVuHZPKJD8fcnKgXbugI0kPeXnwv//BwoVBR+Js3eqaxaxJzJRBScllGPAT8C6uuGTrspTcNzFUxn6XpUvdwmB21xK/VFud8vPPXU04Sy6mDIpNLqp6E5ADvAZcCswSkftFpFWygss4Rx8Nhx5auZLLqFHuecCAYONIJ61aucEPqZJcwmHXPHdKsWvvGbOfEjv01fkQ16H/NHA5YEWhyksETj21ciWX/Hw36a5Fi6AjSS95eW7S4tatQUfi4ujQAerXDzoSk0aKTS4iUltELhaR8cBE4ECgo6qOSFp0mSgUguXLXZt6pps71z2sSazs8vJgy5bgV6fcts2VfbEmMVNGJd25rMbdsXwKPIRbdOtEETlXRM5NRnAZqTL1u+Tnu3Ih558fdCTp57TToHr14JvGpk93d082edKUUUnJ5TVgFtAOt5zwb6MeZyU+tAx17LGueSHTk4uq62/p0QMaNgw6mvQTWZ3y3XeDjaOgwD1bf4spo5I69Aep6uXFPK6oyEVF5GARmSwii7znmI25ItJLRBaKyGIRuSNq+10i8oO3CuVsEekd9d6fvP0XikheReJMiCpV3H/UTE8uX3zhRopZk1j55eW5ZsUffgguhnDYfSA65JDgYjBpKagKgncAU1Q1G5jivd6HV8/sCeBM4GjgIhE5OmqXR6JWopzoHXM0MABoD/QCnvTOk1pCIVi8OPWXtK2I/HyoUQPOOSfoSNJX0KtTbt8On35qTWKmXIJKLn2Bkd7XI4F+MfbpBCxW1aWquh0Y5R1X2nlHqeo2VV0GLPbOk1oyvd9l1y4YPRp697YZ3RVx3HFw2GHB9bvMmOEKVlpnvimHoJJLI1VdCeA9x2qUbwp8H/W60NsWcYOIzBGR56Ka1Uo7Zg8RuVpEZojIjDVr1pT3+yifnByoWzdzk0s4DD/+aE1iFSUCv/lNcKtTRn4/Tz01+dc2aS+elSgPEJH/JyIjvNfZIlJqh76IvC8ic2M8Srv72HOKGNsii6E8BbTBTfJciRvNVtox+25UHa6quaqa26BBgzhD8klWluuszdTkkp/vOqTPsnEfFdarl1sLJ4jVKcNhaN/erTNjTBnFc+fyPLANtxoluLuBe0o7SFXPUNVjYjzGA6tEpDGA97w6xikKgeZRr5sBK7xzr1LVXaq6GxjB3qavYo9JOaEQLFjg1njJJNu3w9ix0K8f1KoVdDTpL7I6ZbJHje3Y4ebYWJOYKad4kksbVb0f2AGgqluIfYdQFhNw9crwnsfH2Gc6kC0irUSkOq6jfgLsSUgR5wBzo847QERqeGVqsoEvKhhrYkT+02ZaCf5Jk+Cnn6xJzC+HHgonnJD8fpcvv4RffrHOfFNu8SSX7SJSi70rUbbB3clUxH1ATxFZBPT0XiMiTURkIoCq7gRuACYB84ExqjrPO/5+EflaROYA3YFbvGPmAWOAb3AFNweragCN1XHo2BFq1868prH8fDdstWfPoCPJHHl5rnjkhg3Ju6b1t5gKEi1lTXcR6QnciRsO/B5wMjBIVQsSHl2S5Obm6owZAawikJfnhiN//XXyr50Iv/ziJkxeeik8/XTQ0WSOjz5yf+Rffx3690/ONXv3dktDzJ+fnOuZtCQiM1U1N9Z78axEORk4FxiEWzAsN5MSS6BCITdJbu3aoCPxx1tvuaGr1iTmry5dkrs65c6drr/FmsRMBZRUuLJj5AEcjhuVtQJo4W0zFRXpd/noo2Dj8MvTT0Pz5lYqxG/VqsEZZ7jkUkpLgy9mz4bNm60z31RIscscs3d4bywKnO5zLJXPiSe6EVXhcPrPZP/8c/d9PPywK3Fj/JWXB+PGuRGGRx2V2GtF6olZcjEVUGxyUdXuyQykUqpeHbp2zYxO/aFDXUHOq64KOpLMFL06ZaKTSzgMRx4JjRuXvq8xxYhnEmVNEblVRN4QkbEicrOI1ExGcJVCKARffeWG76arBQvgzTfhhhvc5Enjv5Yt3R/8RPe77NrlmmntrsVUUDztFy/iCkEOAx7HjRp7KZFBVSqhkGtHD3pRqIp44AGoWRNuvDHoSDJbXp67q0jk6pRffQUbN1pyMRUWT3Jpq6pXquqH3uNq4MhEB1ZpdOrkmsfStWnshx/gpZfgiiusTEiiRVanTOQAkMjvoSUXU0HxJJdZItIl8kJEOgOfJC6kSqZWLejcOX2Ty7//Dbt3w223BR1J5kvG6pThMLRpA82aJe4aplKIJ7l0Bj4VkeUishz4DAhFzZA3FRUKuXIbmzYFHUnZ/PSTG358wQXQqlXQ0WS+2rXdMO9E1RnbvduVI7K7FuODeJJLL6AVEPIerYDe7F362FRUKOT+Y3+SZjeETz0FP/8Mt98edCSVR14ezJsHhYX+n/vrr90HBps8aXwQzwz974BNwEHAIZGHqn7nvWcqqmtXqFo1vZrGtmyBRx91JeFzcoKOpvJI5OqU1t9ifFTSJEoARORuXOmXJexdG8UmUfqpdm03oTKdksvIkbB6NQwZEnQklcuxx7r5J5MmuUEUfioocEOeW7Tw97ymUio1uQAX4Mrub090MJVaKAQPPuiKP9auHXQ0Jdu1y8XaqZN9yk22yOqUEya4f4esLH/OG+lv+a21dBt/xNPnMheol+hAKr1QyBUM/PTToCMp3dixsGSJu2uRii7tY8osL8/1jfhZyfubb2DdOvuwYHwTT3K5FzcceZKITIg8Eh1YpXPyye5TaKo3janCffe52eJ9412x2vgqEatTWj0x47N4msVGAkOBr4HdflxURA4GRgMtgeXABaq6X/0TEekFPApkAc+oamRRsdFAW2+3esAGVc0RkZa4hcUWeu9NU9Vr/Yg54erUcQuIpXpyef99mDULnnnGvyYZUzaHHgq5ua7f5W9/8+ec4bDra2nZ0p/zmUovnjuXtar6mDc7Pxx5VPC6dwBTVDUbmOK93oeIZAFPAGfiSs5cJCJHA6jqhaqao6o5wFjgjahDl0TeS5vEEhEKwRdfuJFYqWroUGjSBH73u6Ajqdwiq1P6UZNO1SWXUMiaOY1v4kkuM0XkXhHpWmSNl4roi7sjwnvuF2OfTsBiVV3qDSYY5R23h4gIbsBBfgXjSQ2hEGzfDtOmBR1JbDNnwpQpcPPNUKNG0NFUbnl5rhN+ypSKn2v+fFizxprEjK/iSS7HA12Af+HWeHkIeLCC122kqisBvOeGMfZpCnwf9brQ2xbtFGCVqi6K2tZKRGaJSFhE0mvVqm7d3CfHVG0aGzoUDjoIrrkm6EhM585Qt64/pWAiv282edL4qNQ+l/Ku6yIi7wOHxXjrL/GeIlY4RV5fxL53LSuBFqq6TkROAN4Ukfaqul9dFRG5GrgaoEWqjOuvV89NSEzF5LJokVvDfcgQ90fNBKtaNejRY+/qlBVpzgqHoWlTaN3av/hMpRdPhz4i0gdXdn/POi6q+o+SjlHVM0o43yoRaayqK0WkMbA6xm6FQPOo181wyyxHzlEVOBc4Ieqa24Bt3tczRWQJroLzfmM2VXU4MBwgNzc3CWvHxikUcvW6tm1LraanBx90RRNvuinoSEyEH6tTqrqRYj16WH+L8VU8i4U9DVwI3Ii7mzgfOLyC150ADPS+HgiMj7HPdCBbRFqJSHVggHdcxBnAAlXdU2RJRBp4AwEQkdZANrC0grEmVyjk1uv44ougI9nrxx/djPxBg+CwWDejJhCRUjAVGZL87bewapU1iRnfxdPncpKqXgb8pKp/B7qy7x1FedwH9BSRRUBP7zUi0kREJgKo6k7gBmASbnjxGFWdF3WOAezfkX8qMEdEvgJeB65V1fUVjDW5TvG6iVKpaezRR2HHDvi//ws6EhOtZUto27Zi/S5WT8wkiKiW3CIkIp+ramcRmYZrhloHzPWGEWeE3NxcneHnbOeKOu44aNQIJk8OOhK3KmGLFu5T8pgxQUdjirrpJhg+HNavd2sDldXFF8OHH8KKFdYsZspMRGaqam6s9+K5c/mviNQDHgC+xE16zIyhv6kqFHJlYHbsCDoS+M9/3DozVqAyNeXluWbU8qxOGZnfctpplliM7+IpuX+3qm5Q1bG4vpZ2qvrXxIdWiYVC8Ouv/taOKo9t29xKk2ecASecUPr+JvlCofKvTrlkibtjsSYxkwDFJhcROVFEDot6fRkwBrjbK99iEuXUU91z0P0uL70EK1faXUsqi6xOWZ7kYvXETAKVdOfyH2A7gIiciut0fxHYiDeE1yRIw4ZuaGmQyWXXLnjgAVfvrEeP4OIwpevVy61O+f33pe8bLRx2v2vt2iUmLlOplZRcsqJGWl0IDFfVsar6/4AjEh9aJRcKwccfuzL8QRg/3g1TtbL6qa88q1NaPTGTYCUmF2+iIkAP4IOo9+KafGkqIBRy69PPmpX8a0fK6rdpA/37J//6pmyOOcYVEy1L09iyZe5Ox+a3mAQpKbnkA2ERGQ9sAT4CEJEjcE1jJpEi7eBBNI0VFMD06W5ei5XVT32R1Snff981Z8bD5reYBCs2uajqP4HbgBeAbrp3QkwV3Gx9k0iNG0N2djDJZehQN89m0KDkX9uUT2R1yunT49s/HHbrwhx9dGLjMpVWiUORVXWaqo5T1V+itn2rql8mPjRDKOTmL8T7adQPs2e75pWbboKaNUvf36SGyOqU8TaNFRRYf4tJqHgmUZqghEJuhvycOcm75v33u1Uxr7suedc0FXfIIW51ynjqjH33nXtYk5hJIEsuqSzZ/S5Ll8Lo0W69lnr1knNN459evVzB09JWp7T+FpMEllxSWfPm0KpV8pLLQw+5Dvybb07O9Yy/IqtTvv9+yfsVFMDBB7tRZsYkiCWXVBcKwdSp7o9GIq1eDc89B5dd5haOMumnc2e3Umhp/S7hsKsCUcX++5vEsd+uVBcKuYq38+aVvm9FDBvmaon98Y+JvY5JnKpV912dMpbvv3fNn9YkZhLMkkuqS0a/y+bN8MQT0K+fWx/EpK+8PCgshPnzY78f+T2yyZMmwSy5pLqWLV3fSyKTy4gRrhPYClSmv9JWpwyH3WCNY49NXkymUgokuYjIwSIyWUQWec/1i9nvORFZLSJz4z1eRP4kIotFZKGI5CX6e0k4kb39LqUs7FYu27fDww+7a3Tu7P/5TXIdfrgrRFlcv0s47KooW+UFk2BB3bncAUzxVrOc4r2O5QWgV7zHi8jRuOWP23vHPSki6f+/KBRyHe4LFvh/7ldfhR9+gDuK+ycwaScvz30Y2bJl3+0rVsCiRdYkZpIiqOTSFxjpfT0S6BdrJ1WdCqyP8VZxx/cFRqnqNlVdBiwGOvkVdGAS1e+ye7ebNNmhw97mFJP+IqtTTp2673ab32KSKKjk0khVVwJ4zw19Or4pEL2oRaG3Lb0dcYSrNeZ3cvnvf13H7+23WxmQTBIKQY0a+zeNhcNQty7k5AQTl6lUElY6X0TeBw6L8dZfEnVNINZfyJgdFSJyNXA1QIsWLRIYkg8i/S7hsOt38SMRRMrqt2wJF1xQ8fOZ1HHAAbFXpywosP4WkzQJu3NR1TNU9ZgYj/HAKhFpDOA9ry7j6Ys7vhBoHrVfM2BFMfENV9VcVc1t0KBBGS8fgFDILTm8eLE/5/v4Y/jsM7jtNjc/wmSWvDz45pu9q1P++CMsXGhNYiZpgmoWmwAM9L4eCIz36fgJwAARqSEirYBs4IsKxpoa/O53GTrUlVy/4gp/zmdSSy9vHEzk7iXS/2LJxSRJUMnlPqCniCwCenqvEZEmIjIxspOI5AOfAW1FpFBErizpeFWdB4wBvgHeBQarahLr1SdQu3ZuvXM/ksvcufD22/CHP7gmFJN52rd3ZXwiyaWgAA48EDp2DDQsU3kE0h6iqutwSycX3b4C6B31+qKyHO+990/gn/5EmkJEXD0oP/pd7r8fateGwYP9i8+klsjqlOPGwc6d7vemWzdrAjVJYzP000ko5NrQly8v/zm++87NbbnqKlcZ12SuvDzYsAEmTnT9L9YkZpLIkks68aPf5eGH3afaW2/1JyaTus44w/1b33mne22TJ00SWXJJJ+3bu7uN8iaXdevgmWfg4otdvTKT2Q45BE48Eb7+2jWDnnBC0BGZSsSSSzqpUmVvv0t5PP44/PqrmzRpKodI5YWTToJq1YKNxVQqllzSTSgEy5btnb8Qr19+cWu2/Pa37g7IVA6RIcnW32KSzJJLuilvv8uzz7pmMSurX7l07QpPPQXXXRd0JKaSseSSbo47zi1lW5bksmMHPPQQnHyye5jKQwSuvdZGBpqks0Hv6SYry9WHKktyGT0a/vc/1+dijDFJYHcu6SgUcutyrFxZ+r6qbtJk+/bQp0/iYzPGGCy5pKey9Lu8844binr77W60mTHGJIH9tUlHxx8PderEl1zuu8/NabkoZiUdY4xJCOtzSUdVq7qO+dKSy2efwUcfwSOP2BwHY0xS2Z1LugqF3CqSq0tYCmfoUKhfH37/++TFZYwxWHJJX5F+l6LrpEfMnw/jx8MNN7hS68YYk0SWXNJVbq5bi6W4prEHHoBateDGG5MblzHGYMklfVWr5upFxbpzKSyEl1+GK6+EdFjC2RiTcQJJLiJysIhMFpFF3nP9YvZ7TkRWi8jcItsfEJEFIjJHRMaJSD1ve0sR2SIis73H08n4fgITCrlhxuvX77v9kUdg92647bZg4jLGVHpB3bncAUxR1Wxgivc6lheAXjG2TwaOUdXjgG+BP0W9t0RVc7zHtT7GnHpCITdJ8qOP9m776ScYPhwuvBBatgwsNGNM5RZUcukLjPS+Hgn0i7WTqk4F1sfY/p6q7vReTgOaJSLIlNepE9SsuW+/y5NPws8/W1l9Y0yggkoujVR1JYD33LAC57oCeCfqdSsRmSUiYRE5pbiDRORqEZkhIjPWrFlTgcsHqEYN6NJlb3LZsgUefRTOPBM6dAg2NmNMpZaw5CIi74vI3BiPvj5e4y/ATuAVb9NKoIWqHg/cCrwqInVjHauqw1U1V1VzG6Rzp3coBLNnw8aN8MILsGaNldU3xgQuYTP0VfWM4t4TkVUi0lhVV4pIY6CEmYDFnmMgcBbQQ1XVu+Y2YJv39UwRWQIcCcwoz/eQFkIh+Pvf3d3LAw9A585utUpjjAlQUM1iE4CB3tcDgfFlOVhEegFDgLNV9deo7Q1EJMv7ujWQDSz1JeJU1aULVK/u+liWLXN3LSJBR2WMqeSCSi73AT1FZBHQ03uNiDQRkYmRnUQkH/gMaCsihSJypffW40AdYHKRIcenAnNE5CvgdeBaVd1vQEBGqVXLdewvXAht20Jf31odjTGm3AIpXKmq64AeMbavAHpHvY5ZyldVjyhm+1hgrE9hpo9QCD7+2MrqG2NShlVFzgS//z1s3w6/+13QkRhjDGDJJTO0bOlWmzTGmBRhbSjGGGN8Z8nFGGOM7yy5GGOM8Z0lF2OMMb6z5GKMMcZ3llyMMcb4zpKLMcYY31lyMcYY4zvxCgpXaiKyBviuAqc4FFjrUzjpzn4W+7Kfx172s9hXJvw8DlfVmGuWWHLxgYjMUNXcoONIBfaz2Jf9PPayn8W+Mv3nYc1ixhhjfGfJxRhjjO8sufhjeNABpBD7WezLfh572c9iXxn987A+F2OMMb6zOxdjjDG+s+RijDHGd5ZcKkBEeonIQhFZLCJ3BB1PkESkuYh8KCLzRWSeiNwUdExBE5EsEZklIv8NOpagiUg9EXldRBZ4vyNdg44pSCJyi/f/ZK6I5ItIzaBj8psll3ISkSzgCeBM4GjgIhE5OtioArUTuE1VjwK6AIMr+c8D4CZgftBBpIhHgXdVtR3QgUr8cxGRpsAfgFxVPQbIAgYEG5X/LLmUXydgsaouVdXtwCigb8AxBUZVV6rql97Xm3F/PJoGG1VwRKQZ0Ad4JuhYgiYidYFTgWcBVHW7qm4INqrAVQVqiUhV4ABgRcDx+M6SS/k1Bb6Pel1IJf5jGk1EWgLHA58HG0mg/g3cDuwOOpAU0BpYAzzvNRM+IyK1gw4qKKr6A/Ag8D9gJbBRVd8LNir/WXIpP4mxrdKP6xaRA4GxwM2quinoeIIgImcBq1V1ZtCxpIiqQEfgKVU9HvgFqLR9lCJSH9fK0QpoAtQWkd8FG5X/LLmUXyHQPOp1MzLw1rYsRKQaLrG8oqpvBB1PgE4GzhaR5bjm0tNF5OVgQwpUIVCoqpE72ddxyaayOgNYpqprVHUH8AZwUsAx+c6SS/lNB7JFpJWIVMd1yE0IOKbAiIjg2tTnq+rDQccTJFX9k6o2U9WWuN+LD1Q14z6ZxktVfwS+F5G23qYewDcBhhS0/wFdROQA7/9NDzJwgEPVoANIV6q6U0RuACbhRns8p6rzAg4rSCcDlwJfi8hsb9ufVXVigDGZ1HEj8Ir3QWwpcHnA8QRGVT8XkdeBL3GjLGeRgaVgrPyLMcYY31mzmDHGGN9ZcjHGGOM7Sy7GGGN8Z8nFGGOM7yy5GGOM8Z0lF2M8XuXe66NeN/GGjCbiWv1E5K+JOHeM6/hSQFREGojIu36cy2Q+Sy7G7FUP2JNcVHWFqp6XoGvdDjyZoHMD4BVF7Ier2l3W4/ajqmuAlSJysg/hmQxnycWYve4D2ojIbBF5QERaishcABEZJCJvishbIrJMRG4QkVu9QozTRORgb782IvKuiMwUkY9EpF3Ri4jIkcA2VV3rvW4lIp+JyHQRuVtEfva2nxa9FoyIPC4ig7yv/+rtP1dEhnszvRGRAhH5l4iEgSHA2cAD3vfUprj4ROQFEXlYRD4EhopIyDtmtvc91vHCeBO4JAE/e5NhbIa+MXvdARyjqjmwp7pztGNw1Z5rAouBIap6vIg8AlyGq4Q8HLhWVReJSGfc3cnpRc5zMm52dsSjuKKOL4rI4DhjfVxV/+HF+RJwFvCW9149VQ1572UD/1XV173XU0qI70jgDFXdJSJvAYNV9ROvGOlWb58ZwD1xxmgqMUsuxsTvQ2+tms0ispG9f8y/Bo7z/gifBLzm3UgA1Ihxnsa4EvQRZxGW0AAAAatJREFUJwP9va9fAobGEUt3EbkdtxbIwcC8qHhGxzogjvheU9Vd3tefAA+LyCvAG6pa6G1fjavka0yJLLkYE79tUV/vjnq9G/d/qQqwIXLnU4ItwEFFtsWqw7STfZuuawJ4S+I+iVvJ8HsRuSvynueXYq5bWnx7jlPV+0TkbaA3ME1EzlDVBd51thRzvDF7WJ+LMXttBuqUulcxvPVrlonI+eAqRYtIhxi7zgeOiHr9CXuXuY3uz/gOOFpEaojIQbjqubA3kaz17kZKGnSw53sqQ3yISBtV/VpVh+KawiJ9R0cCc0u4njGAJRdj9lDVdcAnXif5A+U8zSXAlSLyFa6pKtbS11OB42Vv29RNwGARmU7UHY2qfg+MAeYAr+Cq5+ItETwC1xz3Jm75h+KMAv7odcq3iTM+gJu9n8NXuDuVd7zt3YG3S7ieMYBVRTYmECLyKPCWqr4f472fVfXAAMIqlYhMBfqq6k9Bx2JSm925GBOMf+E649OGiDQAHrbEYuJhdy7GGGN8Z3cuxhhjfGfJxRhjjO8suRhjjPGdJRdjjDG+s+RijDHGd/8fRgzIwPi37NEAAAAASUVORK5CYII=\n",
      "text/plain": [
       "<Figure size 432x288 with 1 Axes>"
      ]
     },
     "metadata": {
      "needs_background": "light"
     },
     "output_type": "display_data"
    }
   ],
   "source": [
    "plt.plot(r_G, label='G-learning (' + str(np.round(SR_G,3)) + ')', color='red')\n",
    "\n",
    "plt.legend()\n",
    "plt.xlabel('time (quarters)')\n",
    "plt.ylabel('Sample Mean Returns')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "aTWkeFsDPQQu"
   },
   "outputs": [],
   "source": [
    "np.save('State_act_trajs.npy', trajs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 286
    },
    "colab_type": "code",
    "id": "SNtCvuFMPQQ0",
    "outputId": "2f12bdef-6454-4811-9d4b-5ee128b9f596"
   },
   "outputs": [],
   "source": [
    "#trajs = np.load('State_act_trajs.npy')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "4Wdu1t058d7a"
   },
   "source": [
    "## GIRL\n",
    "Implement the optimizer for finding the optimal G-learning parameters by MLE."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "gH6Ho4m9yHvX"
   },
   "outputs": [],
   "source": [
    "def get_loss(trajs,\n",
    "             num_steps, \n",
    "             benchmark_portf, \n",
    "             gamma, \n",
    "             num_risky_assets, \n",
    "             riskfree_rate, \n",
    "             expected_risky_returns, \n",
    "             Sigma_r, \n",
    "             x_vals_init, \n",
    "             max_iter_RL, \n",
    "             reward_params,\n",
    "             beta, \n",
    "             num_trajs, \n",
    "             grad=False, \n",
    "             eps=1e-7):\n",
    "\n",
    "\n",
    "  data_xvals = torch.zeros(num_trajs,  num_steps, num_assets, dtype=torch.float64, requires_grad=False)\n",
    "  data_uvals = torch.zeros(num_trajs,  num_steps, num_assets, dtype=torch.float64, requires_grad=False)\n",
    "        \n",
    "  for n in range(num_trajs):\n",
    "        for t in range(num_steps):\n",
    "            data_xvals[n,t,:] = torch.tensor(trajs[n][t][0],dtype=torch.float64)\n",
    "            data_uvals[n,t,:] = torch.tensor(trajs[n][t][1],dtype=torch.float64)\n",
    "                \n",
    "                \n",
    "  # allocate memory for tensors that wil be used to compute the forward pass\n",
    "  realized_rewards = torch.zeros(num_trajs, num_steps, dtype=torch.float64, requires_grad=False)\n",
    "  realized_cum_rewards = torch.zeros(num_trajs, dtype=torch.float64, requires_grad=False)\n",
    "\n",
    "  realized_G_fun = torch.zeros(num_trajs, num_steps, dtype=torch.float64, requires_grad=False)\n",
    "  realized_F_fun  = torch.zeros(num_trajs,  num_steps, dtype=torch.float64, requires_grad=False)\n",
    "\n",
    "  realized_G_fun_cum = torch.zeros(num_trajs, dtype=torch.float64, requires_grad=False)  \n",
    "  realized_F_fun_cum = torch.zeros(num_trajs, dtype=torch.float64, requires_grad=False)  \n",
    "\n",
    "  reward_params_dict={}\n",
    "  loss_dict={}\n",
    "  loss_dict[-1]=np.array([0]*len(reward_params), dtype='float64') # perturb up\n",
    "  loss_dict[1]=np.array([0]*len(reward_params), dtype='float64') # perturb down\n",
    "  loss_grad = np.array([0]*len(reward_params), dtype='float64') \n",
    "\n",
    "  if grad: # compute gradient\n",
    "    for j in range(len(reward_params)):\n",
    "      for k in [-1,1]:\n",
    "            reward_params_dict[k]=reward_params\n",
    "            reward_params_dict[k][j]= reward_params_dict[k][j] + k*eps\n",
    "              \n",
    "            # 1. create a G-learner\n",
    "            G_learner = G_learning_portfolio_opt(num_steps,\n",
    "                                             reward_params_dict[k],\n",
    "                                             beta,\n",
    "                                             benchmark_portf,\n",
    "                                             gamma,\n",
    "                                             num_risky_assets,\n",
    "                                             riskfree_rate,\n",
    "                                             expected_risky_returns,\n",
    "                                             Sigma_r,\n",
    "                                             x_vals_init,\n",
    "                                             use_for_WM=True)\n",
    "        \n",
    "            G_learner.reset_prior_policy()\n",
    "        \n",
    "            # run the G-learning recursion to get parameters of G- and F-functions\n",
    "            G_learner.G_learning(error_tol, max_iter_RL)\n",
    "        \n",
    "            # compute the rewards and realized values of G- and F-functions from \n",
    "            # all trajectories\n",
    "            for n in range(num_trajs):\n",
    "              for t in range(num_steps):\n",
    "                \n",
    "                realized_rewards[n,t] = G_learner.compute_reward_on_traj(t,\n",
    "                                data_xvals[n,t,:], data_uvals[n,t,:])\n",
    "\n",
    "\n",
    "                realized_G_fun[n,t] = G_learner.compute_G_fun_on_traj(t,\n",
    "                                data_xvals[n,t,:], data_uvals[n,t,:])\n",
    "\n",
    "\n",
    "                realized_F_fun[n,t] = G_learner.compute_F_fun_on_traj(t,\n",
    "                                data_xvals[n,t,:])\n",
    "                \n",
    "              realized_cum_rewards[n] = realized_rewards[n,:].sum()\n",
    "              realized_G_fun_cum[n] = realized_G_fun[n,:].sum()\n",
    "              realized_F_fun_cum[n] = realized_F_fun[n,:].sum()\n",
    "          \n",
    "\n",
    "\n",
    "            \n",
    "            loss_dict[k][j] = - beta *(realized_G_fun_cum.sum() - realized_F_fun_cum.sum())\n",
    "      loss_grad[j]=(loss_dict[1][j]-loss_dict[-1][j])/(2.0*eps)\n",
    "\n",
    "  G_learner = G_learning_portfolio_opt(num_steps,\n",
    "                                      reward_params,\n",
    "                                      beta,\n",
    "                                      benchmark_portf,\n",
    "                                      gamma,\n",
    "                                      num_risky_assets,\n",
    "                                      riskfree_rate,\n",
    "                                      expected_risky_returns,\n",
    "                                      Sigma_r,\n",
    "                                      x_vals_init,\n",
    "                                      use_for_WM=True)\n",
    "        \n",
    "  G_learner.reset_prior_policy()\n",
    "        \n",
    "  G_learner.G_learning(error_tol, max_iter_RL)\n",
    "        \n",
    "  # compute the rewards and realized values of G- and F-functions from \n",
    "  # all trajectories\n",
    "  for n in range(num_trajs):\n",
    "      for t in range(num_steps):\n",
    "                \n",
    "                realized_rewards[n,t] = G_learner.compute_reward_on_traj(t,\n",
    "                                data_xvals[n,t,:], data_uvals[n,t,:])\n",
    "\n",
    "\n",
    "                realized_G_fun[n,t] = G_learner.compute_G_fun_on_traj(t,\n",
    "                                data_xvals[n,t,:], data_uvals[n,t,:])\n",
    "\n",
    "\n",
    "                realized_F_fun[n,t] = G_learner.compute_F_fun_on_traj(t,\n",
    "                                data_xvals[n,t,:])\n",
    "                \n",
    "      realized_cum_rewards[n] = realized_rewards[n,:].sum()\n",
    "      realized_G_fun_cum[n] = realized_G_fun[n,:].sum()\n",
    "      realized_F_fun_cum[n] = realized_F_fun[n,:].sum()\n",
    "          \n",
    "    \n",
    "\n",
    "  loss = - beta *(realized_G_fun_cum.sum() - realized_F_fun_cum.sum())   \n",
    "  if grad:\n",
    "    return loss, loss_grad\n",
    "  else:\n",
    "    return loss   \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "Z2cYmHjoTZKM"
   },
   "outputs": [],
   "source": [
    "def fun(x, grad_=False, rescale=1.0, constraint=False):\n",
    "    y=x.copy()\n",
    "    y[0]/=sc[0]\n",
    "    y[1]/=sc[1]\n",
    "    y[2]/=sc[2]\n",
    "    y[3]/=sc[3]\n",
    "    with torch.no_grad():\n",
    "       if grad_==False:\n",
    "           if constraint:\n",
    "            y[0] = y[0]**2\n",
    "            y[1] = 1+y[1]**2\n",
    "            y[2] = 1+y[2]**2\n",
    "            y[3] = 1.0/(1.0+np.exp(-y[3]))\n",
    "\n",
    "           ret = rescale*get_loss(trajs,\n",
    "                                 num_steps, \n",
    "                                 benchmark_portf, \n",
    "                                 gamma, \n",
    "                                 num_risky_assets, \n",
    "                                 riskfree_rate, \n",
    "                                 expected_risky_returns, \n",
    "                                 Sigma_r, \n",
    "                                 x_vals_init, \n",
    "                                 max_iter_RL, \n",
    "                                 y, \n",
    "                                 beta,\n",
    "                                 len(trajs), \n",
    "                                 grad=grad_, \n",
    "                                 eps=1e-7).detach().numpy()\n",
    "\n",
    "           print(ret)\n",
    "           return ret\n",
    "       else:\n",
    "            f, df = get_loss(trajs,\n",
    "                                 num_steps, \n",
    "                                 benchmark_portf, \n",
    "                                 gamma, \n",
    "                                 num_risky_assets, \n",
    "                                 riskfree_rate, \n",
    "                                 expected_risky_returns, \n",
    "                                 Sigma_r, \n",
    "                                 x_vals_init, \n",
    "                                 max_iter_RL, \n",
    "                                 y, \n",
    "                                 beta,\n",
    "                                 len(trajs), \n",
    "                                 grad=grad_, \n",
    "                                 eps=1e-7)\n",
    "            return f.detach().numpy()*rescale, df*rescale/sc"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "JQXvu1HqPQRc"
   },
   "source": [
    "### GIRL (G-learning IRL)\n",
    "Two different optimizers are used to demonstrate the implementation of GIRL. The first approach is gradient free and the second approach uses the gradient of the loss function. \n",
    "#### Gradient free\n",
    "Initialize the parameters"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "58IbcwyOTm9S"
   },
   "outputs": [],
   "source": [
    "lambd_0 = 0.002\n",
    "omega_0 = 1.1\n",
    "eta_0 = 1.3 \n",
    "beta_0 = beta \n",
    "rho_0 = 0.5"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "qK1h_-r1UCN8"
   },
   "outputs": [],
   "source": [
    "sc=np.array([1,1,1,1]) # optional re-scaling"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "O3A81-tocRuz"
   },
   "outputs": [],
   "source": [
    "x0=np.array([lambd_0, omega_0, eta_0, rho_0])\n",
    "x0=[np.sqrt(x0[0]), np.sqrt(x0[1]-1), np.sqrt(x0[2]-1), np.log(x0[3]/(1-x0[3]))]\n",
    "x0*=sc"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 69
    },
    "colab_type": "code",
    "id": "XdgT7EKxZVDf",
    "outputId": "77a4f480-7d1b-4c78-f021-75f36e51e47a"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Doing G-learning, it may take a few seconds...\n",
      "0.15559492691209167\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "0.15559492691209167"
      ]
     },
     "execution_count": 45,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# test evaluate the loss function\n",
    "fun(x0, False, 1e-9, constraint=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 362
    },
    "colab_type": "code",
    "id": "NR9zW91ndDVR",
    "outputId": "aef06036-ee0e-4ab4-cc7f-4ea85627a0a5",
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# Optimize with the Nelder-Mead method\n",
    "res = minimize(fun, x0, method='Nelder-Mead', args=(False, 1e-9, True), options={'disp': True, 'maxiter':50}, tol=0.01)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 0,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 225
    },
    "colab_type": "code",
    "id": "HRAO1V1_dtfd",
    "outputId": "2059a172-03ae-499d-e929-0e158baa8afd"
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       " final_simplex: (array([[3.88544708e-02, 3.42178403e-01, 5.52709786e-01, 2.70177583e-04],\n",
       "       [3.88086831e-02, 3.46053369e-01, 5.51339880e-01, 2.60418344e-04],\n",
       "       [3.93490486e-02, 3.44376362e-01, 5.46004202e-01, 2.58519441e-04],\n",
       "       [3.86693432e-02, 3.40562959e-01, 5.49617968e-01, 3.93678456e-04],\n",
       "       [3.92546304e-02, 3.41403971e-01, 5.56233052e-01, 2.73554265e-04]]), array([0.00096548, 0.00111332, 0.00112284, 0.00116518, 0.00130054]))\n",
       "           fun: 0.0009654780600741506\n",
       "       message: 'Optimization terminated successfully.'\n",
       "          nfev: 31\n",
       "           nit: 17\n",
       "        status: 0\n",
       "       success: True\n",
       "             x: array([3.88544708e-02, 3.42178403e-01, 5.52709786e-01, 2.70177583e-04])"
      ]
     },
     "execution_count": 105,
     "metadata": {
      "tags": []
     },
     "output_type": "execute_result"
    }
   ],
   "source": [
    "res"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 0,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "bw2lEM2Tn9tz"
   },
   "outputs": [],
   "source": [
    "# Print parameters"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 0,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 86
    },
    "colab_type": "code",
    "id": "D7zhJ8K5ePvA",
    "outputId": "c3025c91-c657-4f33-ed2a-665045ecc375"
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(0.0015096699032340407,\n",
       " 1.1170860597978067,\n",
       " 1.3054881075845823,\n",
       " 0.5000675443952752)"
      ]
     },
     "execution_count": 106,
     "metadata": {
      "tags": []
     },
     "output_type": "execute_result"
    }
   ],
   "source": [
    "res.x[0]**2/sc[0], (1+res.x[1]**2)/sc[1], (1+res.x[2]**2)/sc[2], 1.0/(1+np.exp(-res.x[3]))/sc[3]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "1TpYxXmq89QD"
   },
   "source": [
    "#### GIRL (Gradient based)\n",
    "Now separately perform GIRL using a gradient based optimizer. This has the advantage of being more accurate but can be less stable."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "VPZgTZ789R_H"
   },
   "outputs": [],
   "source": [
    "# initialize the parameters\n",
    "lambd_0 = 0.002\n",
    "omega_0 = 1.1\n",
    "eta_0 = 1.3 \n",
    "beta_0 = beta \n",
    "rho_0 = 0.5"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "Z9nO_7hHBGai"
   },
   "outputs": [],
   "source": [
    "x0=np.array([lambd_0, omega_0, eta_0, rho_0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "MLHLm-uU9XSu"
   },
   "outputs": [],
   "source": [
    "# rescaling\n",
    "sc=np.array([1000,0.1,1,1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 69
    },
    "colab_type": "code",
    "id": "yEO1I-P2DpLp",
    "outputId": "17c8508d-1071-40fe-9735-2a6d37ae705c"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Doing G-learning, it may take a few seconds...\n",
      "2.9299978636205197\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "2.9299978636205197"
      ]
     },
     "execution_count": 37,
     "metadata": {
      "tags": []
     },
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# test evaluation of the loss function\n",
    "x_ask=np.array([lambd, omega, eta, rho])*sc\n",
    "fun(x_ask, False, 1e-6, constraint=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "WCZdikslULik"
   },
   "outputs": [],
   "source": [
    "# choose bounds for parameters\n",
    "bnds=((0.0001*sc[0], 0.0025*sc[0]), (0.01*sc[1], 1.5*sc[1]), (1.001*sc[2],2*sc[2]), (0.01*sc[3],1*sc[3]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 0,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 1000
    },
    "colab_type": "code",
    "id": "yScEwb3xUcSc",
    "outputId": "597e90db-1c56-4f6a-81d5-3ea9e9a26d5e"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Doing G-learning, it may take a few seconds...\n",
      "114.51214672236144\n",
      "Doing G-learning, it may take a few seconds...\n",
      "114.51204693140834\n",
      "Doing G-learning, it may take a few seconds...\n",
      "114.51209141444414\n",
      "Doing G-learning, it may take a few seconds...\n",
      "114.51206416361033\n",
      "Doing G-learning, it may take a few seconds...\n",
      "114.51207036474347\n",
      "Doing G-learning, it may take a few seconds...\n",
      "29120.70186816916\n",
      "Doing G-learning, it may take a few seconds...\n",
      "29120.70137245473\n",
      "Doing G-learning, it may take a few seconds...\n",
      "29120.701732233163\n",
      "Doing G-learning, it may take a few seconds...\n",
      "29120.701009316206\n",
      "Doing G-learning, it may take a few seconds...\n",
      "29120.699302427798\n",
      "Doing G-learning, it may take a few seconds...\n",
      "90.57371626668423\n",
      "Doing G-learning, it may take a few seconds...\n",
      "90.5737276693508\n",
      "Doing G-learning, it may take a few seconds...\n",
      "90.57366790079325\n",
      "Doing G-learning, it may take a few seconds...\n",
      "90.5737428843826\n",
      "Doing G-learning, it may take a few seconds...\n",
      "90.573748633489\n",
      "Doing G-learning, it may take a few seconds...\n",
      "72.9283061145395\n",
      "Doing G-learning, it may take a few seconds...\n",
      "72.9283157967627\n",
      "Doing G-learning, it may take a few seconds...\n",
      "72.9282648113966\n",
      "Doing G-learning, it may take a few seconds...\n",
      "72.92832845304162\n",
      "Doing G-learning, it may take a few seconds...\n",
      "72.9282520609051\n",
      "Doing G-learning, it may take a few seconds...\n",
      "37.71620820410549\n",
      "Doing G-learning, it may take a few seconds...\n",
      "37.71621405009925\n",
      "Doing G-learning, it may take a few seconds...\n",
      "37.71618248212337\n",
      "Doing G-learning, it may take a few seconds...\n",
      "37.71622130455822\n",
      "Doing G-learning, it may take a few seconds...\n",
      "37.71617523895204\n",
      "Doing G-learning, it may take a few seconds...\n",
      "12.188549883708358\n",
      "Doing G-learning, it may take a few seconds...\n",
      "12.188550437569617\n",
      "Doing G-learning, it may take a few seconds...\n",
      "12.188544672019779\n",
      "Doing G-learning, it may take a few seconds...\n",
      "12.18855044157058\n",
      "Doing G-learning, it may take a few seconds...\n",
      "12.188545932970941\n",
      "Doing G-learning, it may take a few seconds...\n",
      "10.915274291366337\n",
      "Doing G-learning, it may take a few seconds...\n",
      "10.915284751296042\n",
      "Doing G-learning, it may take a few seconds...\n",
      "10.9152731224671\n",
      "Doing G-learning, it may take a few seconds...\n",
      "10.91528331350535\n",
      "Doing G-learning, it may take a few seconds...\n",
      "10.915278079748154\n",
      "Doing G-learning, it may take a few seconds...\n",
      "11.27782982391864\n",
      "Doing G-learning, it may take a few seconds...\n",
      "11.277828880958259\n",
      "Doing G-learning, it may take a few seconds...\n",
      "11.277830183051526\n",
      "Doing G-learning, it may take a few seconds...\n",
      "11.27782687536627\n",
      "Doing G-learning, it may take a few seconds...\n",
      "11.277836134195327\n",
      "Doing G-learning, it may take a few seconds...\n",
      "11.30417232234776\n",
      "Doing G-learning, it may take a few seconds...\n",
      "11.304171463668347\n",
      "Doing G-learning, it may take a few seconds...\n",
      "11.304172374643384\n",
      "Doing G-learning, it may take a few seconds...\n",
      "11.304169572636484\n",
      "Doing G-learning, it may take a few seconds...\n",
      "11.304164115749298\n",
      "Doing G-learning, it may take a few seconds...\n",
      "11.283843143671751\n",
      "Doing G-learning, it may take a few seconds...\n",
      "11.283842222958803\n",
      "Doing G-learning, it may take a few seconds...\n",
      "11.283843421794474\n",
      "Doing G-learning, it may take a few seconds...\n",
      "11.28384024761617\n",
      "Doing G-learning, it may take a few seconds...\n",
      "11.283834749862551\n",
      "Doing G-learning, it may take a few seconds...\n",
      "11.279360758543014\n",
      "Doing G-learning, it may take a few seconds...\n",
      "11.279359821498394\n",
      "Doing G-learning, it may take a few seconds...\n",
      "11.279361096225678\n",
      "Doing G-learning, it may take a few seconds...\n",
      "11.279357823900877\n",
      "Doing G-learning, it may take a few seconds...\n",
      "11.279352315418421\n",
      "Doing G-learning, it may take a few seconds...\n",
      "11.278233894459902\n",
      "Doing G-learning, it may take a few seconds...\n",
      "11.278232953064142\n",
      "Doing G-learning, it may take a few seconds...\n",
      "11.278234247900546\n",
      "Doing G-learning, it may take a few seconds...\n",
      "11.27823094960302\n",
      "Doing G-learning, it may take a few seconds...\n",
      "11.278225438296795\n",
      "Doing G-learning, it may take a few seconds...\n"
     ]
    }
   ],
   "source": [
    "# L-BFGS-B for gradient solver with bounds\n",
    "res = minimize(fun, x0, method='L-BFGS-B', bounds=bnds, args=(False, 1e-6, False), options={'disp': True, 'maxiter':50}, tol=0.0000001) #"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 165
    },
    "colab_type": "code",
    "id": "_6APDqOx-Tqx",
    "outputId": "41710a5d-050d-46fc-b135-659b64d74da2"
   },
   "outputs": [],
   "source": [
    "res"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 89,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 34
    },
    "colab_type": "code",
    "id": "9TcGgfihDQG1",
    "outputId": "16c1069e-f90d-443f-f897-87c7d9ea3892"
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([0.002     , 1.10000001, 1.30000003, 0.50000006])"
      ]
     },
     "execution_count": 89,
     "metadata": {
      "tags": []
     },
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#print results. Note that these may differ from the actual G-learner parameters depending on the optimizer. \n",
    "# The optimizer will attempt to find the closest set of parameters.\n",
    "res.x/sc"
   ]
  }
 ],
 "metadata": {
  "colab": {
   "collapsed_sections": [],
   "include_colab_link": true,
   "machine_shape": "hm",
   "name": "Wealth_Management_GIRL.ipynb",
   "provenance": []
  },
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
