{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Policy iteration is guaranteed to converge and at convergence, the current policy and its utility function are the optimal policy and the optimal utility function. First of all, we define a policy π which assigns an action to each state. We can assign random actions to this policy, it does not matter.\n",
    "Once we evaluate the policy we can improve it. The policy improvement is the second and last step of the algorithm. Our environment has a finite number of states and then a finite number of policies. Each iteration yields to a better policy."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Implementing the policy iteration algorithm:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "\n",
    "def return_policy_evaluation(p, u, r, T, gamma):\n",
    "\n",
    "    #v is the state vector\n",
    "    #T is the transition matrix\n",
    "    #u is the utility vector\n",
    "    #reward consists of the rewards earned for moving to a particular state\n",
    "    #gamma is the discount factor by which rewards are discounted over the time\n",
    "    for s in range(12):\n",
    "        if not np.isnan(p[s]):\n",
    "            v = np.zeros((1,12))\n",
    "            v[0,s] = 1.0\n",
    "            action = int(p[s])\n",
    "            u[s] = r[s] + gamma * np.sum(np.multiply(u, np.dot(v, T[:,:,action])))\n",
    "    return u"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def return_expected_action(u, T, v):\n",
    "    \n",
    "#    It returns an action based on the\n",
    "#    expected utility of doing a in state s, \n",
    "#    according to T and u. This action is\n",
    "#    the one that maximize the expected\n",
    "#    utility.\n",
    "    \n",
    "    actions_array = np.zeros(4)\n",
    "    for action in range(4):\n",
    "       #Expected utility of doing a in state s, according to T and u.\n",
    "       actions_array[action] = np.sum(np.multiply(u, np.dot(v, T[:,:,action])))\n",
    "    return np.argmax(actions_array)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def print_policy(p, shape):\n",
    "    \"\"\"Printing utility.\n",
    "\n",
    "    Print the policy actions using symbols:\n",
    "    ^, v, <, > up, down, left, right\n",
    "    * terminal states\n",
    "    # obstacles\n",
    "    \"\"\"\n",
    "    counter = 0\n",
    "    policy_string = \"\"\n",
    "    for row in range(shape[0]):\n",
    "        for col in range(shape[1]):\n",
    "            if(p[counter] == -1): policy_string += \" *  \"            \n",
    "            elif(p[counter] == 0): policy_string += \" ^  \"\n",
    "            elif(p[counter] == 1): policy_string += \" <  \"\n",
    "            elif(p[counter] == 2): policy_string += \" v  \"           \n",
    "            elif(p[counter] == 3): policy_string += \" >  \"\n",
    "            elif(np.isnan(p[counter])): policy_string += \" #  \"\n",
    "            counter += 1\n",
    "        policy_string += '\\n'\n",
    "    print(policy_string)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " v   <   >   *  \n",
      " ^   #   <   *  \n",
      " <   <   ^   v  \n",
      "\n",
      " ^   >   >   *  \n",
      " ^   #   ^   *  \n",
      " <   >   ^   v  \n",
      "\n",
      " >   >   >   *  \n",
      " ^   #   ^   *  \n",
      " >   >   ^   <  \n",
      "\n",
      " >   >   >   *  \n",
      " ^   #   ^   *  \n",
      " >   >   ^   <  \n",
      "\n",
      " >   >   >   *  \n",
      " ^   #   ^   *  \n",
      " ^   >   ^   <  \n",
      "\n",
      " >   >   >   *  \n",
      " ^   #   ^   *  \n",
      " ^   >   ^   <  \n",
      "\n",
      " >   >   >   *  \n",
      " ^   #   ^   *  \n",
      " ^   <   ^   <  \n",
      "\n",
      " >   >   >   *  \n",
      " ^   #   ^   *  \n",
      " ^   <   ^   <  \n",
      "\n",
      " >   >   >   *  \n",
      " ^   #   ^   *  \n",
      " ^   <   ^   <  \n",
      "\n",
      " >   >   >   *  \n",
      " ^   #   ^   *  \n",
      " ^   <   <   <  \n",
      "\n",
      " >   >   >   *  \n",
      " ^   #   ^   *  \n",
      " ^   <   <   <  \n",
      "\n",
      " >   >   >   *  \n",
      " ^   #   ^   *  \n",
      " ^   <   <   <  \n",
      "\n",
      " >   >   >   *  \n",
      " ^   #   ^   *  \n",
      " ^   <   <   <  \n",
      "\n",
      " >   >   >   *  \n",
      " ^   #   ^   *  \n",
      " ^   <   <   <  \n",
      "\n",
      " >   >   >   *  \n",
      " ^   #   ^   *  \n",
      " ^   <   <   <  \n",
      "\n",
      " >   >   >   *  \n",
      " ^   #   ^   *  \n",
      " ^   <   <   <  \n",
      "\n",
      " >   >   >   *  \n",
      " ^   #   ^   *  \n",
      " ^   <   <   <  \n",
      "\n",
      " >   >   >   *  \n",
      " ^   #   ^   *  \n",
      " ^   <   <   <  \n",
      "\n",
      " >   >   >   *  \n",
      " ^   #   ^   *  \n",
      " ^   <   <   <  \n",
      "\n",
      " >   >   >   *  \n",
      " ^   #   ^   *  \n",
      " ^   <   <   <  \n",
      "\n",
      " >   >   >   *  \n",
      " ^   #   ^   *  \n",
      " ^   <   <   <  \n",
      "\n",
      "=================== FINAL RESULT ==================\n",
      "Iterations: 22\n",
      "Delta: 9.043213450299348e-08\n",
      "Gamma: 0.999\n",
      "Epsilon: 0.0001\n",
      "===================================================\n",
      "[0.80796344 0.86539911 0.91653199 1.        ]\n",
      "[ 0.75696624  0.          0.65836281 -1.        ]\n",
      "[0.69968295 0.64882105 0.60471972 0.38150427]\n",
      "===================================================\n",
      " >   >   >   *  \n",
      " ^   #   ^   *  \n",
      " ^   <   <   <  \n",
      "\n",
      "===================================================\n"
     ]
    }
   ],
   "source": [
    "def main():\n",
    "    gamma = 0.999\n",
    "    epsilon = 0.0001\n",
    "    iteration = 0\n",
    "    T = np.load(\"T.npy\")\n",
    "    #Generate the first policy randomly\n",
    "    # NaN=Nothing, -1=Terminal, 0=Up, 1=Left, 2=Down, 3=Right\n",
    "    p = np.random.randint(0, 4, size=(12)).astype(np.float32)\n",
    "    p[5] = np.NaN\n",
    "    p[3] = p[7] = -1\n",
    "    #Utility vectors\n",
    "    u = np.array([0.0, 0.0, 0.0,  0.0,\n",
    "                  0.0, 0.0, 0.0,  0.0,\n",
    "                  0.0, 0.0, 0.0,  0.0])\n",
    "    #Reward vector\n",
    "    r = np.array([-0.04, -0.04, -0.04,  +1.0,\n",
    "                  -0.04,   0.0, -0.04,  -1.0,\n",
    "                  -0.04, -0.04, -0.04, -0.04])\n",
    "\n",
    "    while True:\n",
    "        iteration += 1\n",
    "        #1- Policy evaluation\n",
    "        u_0 = u.copy()\n",
    "        u = return_policy_evaluation(p, u, r, T, gamma)\n",
    "        #Stopping criteria\n",
    "        delta = np.absolute(u - u_0).max()\n",
    "        if delta < epsilon * (1 - gamma) / gamma: break\n",
    "        for s in range(12):\n",
    "            if not np.isnan(p[s]) and not p[s]==-1:\n",
    "                v = np.zeros((1,12))\n",
    "                v[0,s] = 1.0\n",
    "                #2- Policy improvement\n",
    "                a = return_expected_action(u, T, v)         \n",
    "                if a != p[s]: p[s] = a\n",
    "        print_policy(p, shape=(3,4))\n",
    "\n",
    "    print(\"=================== FINAL RESULT ==================\")\n",
    "    print(\"Iterations: \" + str(iteration))\n",
    "    print(\"Delta: \" + str(delta))\n",
    "    print(\"Gamma: \" + str(gamma))\n",
    "    print(\"Epsilon: \" + str(epsilon))\n",
    "    print(\"===================================================\")\n",
    "    print(u[0:4])\n",
    "    print(u[4:8])\n",
    "    print(u[8:12])\n",
    "    print(\"===================================================\")\n",
    "    print_policy(p, shape=(3,4))\n",
    "    print(\"===================================================\")\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    main()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
