\documentclass[a4paper,11pt]{article}
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{lmodern}
\usepackage{amsmath}
\usepackage{subfig}
\usepackage{graphicx}
\DeclareMathOperator{\argmax}{\arg\max}
\title{Intelligent Agents Assignment 2} 
\author{Behrooz Mahasseni}

\begin{document}
\maketitle
%\tableofcontents
\begin{abstract}
  The goal of this assignment is to implement a general planning algorithm for finding the optimum stationary policy for a general MDP. We tested our algorithm on a simple MDP with different configurations to show the correctness of our implementation. In addition we also test our planning algorithm and the resulting stationary policy on a sample parking lot problem with two different configurations and explain the behavior of the policy for each scenario.
\end{abstract}
\section{Implementation}
Our planning uses the value iteration.We extend the base implementation for assignment 1. We add a new StationaryPolicy class which extends the super class Policy. It has the same valueIteration function and as an input it gets the acceptable error with respect to the optimal policy. Based on this $\delta$ we find the $\epsilon = \delta \times \frac{1-\beta}{2 \times \beta}$. Our value iteration function loops until the max norm $\|V_k - V_{k-1}\|_\infty \leq \epsilon$. We finally  use greedy policy selection after the convergence of the value iteration to find the greedy policy. 
In our implementation we have the following classes:
\begin{enumerate}
  \item State: This class stores the state id and the reward associated with the state. In addition it also stores the values of the $V^k_{\pi^*}$. In other words each state object will store the corresponding $V_k$ and $V_{k-1}$ values.
  \item Action: This class stores the id, name and transition probabilities associated with the activity.
  \item Policy: We have a policy interface which is parent class for Stationary and Non-Stationary policy classes. 
  \item StationaryPolicy: Is the class that implements "Policy" interface. It has a function which gives you the optimum action for a give state. The bellman backup algorithm (value iteration) has been implemented in this class as valueIteration() function. This function creates and assigns the optimum policy to the MDP as discussed above.
  \item MDP: This class stores the MDP structure. It has the set of states and actions. 
  \item ParkingSpot: This class is a data class for parking lot problem. There is a name tag which can be "A" or "B" and an index number which index the spot in that row. It also has the probability of being empty for the parking spot. 
\end{enumerate}
\section{Simple MDPs}
To test our planning algorithm we use a simple agent in $3 \times 3$ grid. We have two exit states with reward "+1" and "-1" respectively. Figure \ref{fig:001} shows the environment. We use 5 different moving actions for moving up, down, left, right and exit. The result of performing an action is stochastic. We test our planning algorithm with two simple scenarios. Both scenario work on the same MDP. The only difference is the effect of choosing different $\beta$ values. The agent reaches terminal state when it enters the cell (1,3). 
Note: The complete MDP for both scenarios is defined in file "simpleMDP.txt".  The $\delta = 0.000001$ has been used in both scenarios. 
  \begin{figure}[ht]
    \caption{Simple MDP Environment.}
    \centering
    \includegraphics[totalheight=0.25\textwidth]{hw2_fig1.jpg}
    \label{fig:001}
  \end{figure}
\subsection{Scenario 1}
In this scenario we once set $\beta = 0.1$ and once $ \beta = 0.9$. As it is shown in tables \ref{tab:000} and  \ref{tab:001} in both scenarios the policy choose not to go through the negative state 05 when we are at "state8". The reason for that when $\beta = 0.1$ is that by passing "state 5" it will not be able to compensate the negative reward it gets. When it reaches state 01 the discount factor is going to be $0.01$ and this will reduce the actual reward of "state 0" to "1". On the other hand when $\beta = 0.9$ the negative reward we get at "state 5" is equal to -9 but the positive reward we get when we reach "state 0" is 0.81 which again is not good. As a result the policy choose to avoid the "state 5".
Note: This scenario can be found in "parkinglot1.txt". Generation of the quadratic rewards is in code. 
\begin{table}
  \centering
  \tiny
  \begin{tabular} {|c|c|c|}
  \hline
  state&policy&value\\\hline  
index:0(State:0 o=false p=false)&right&17.414087\\\hline
index:1(State:1 o=false p=false)&right&91.17414\\\hline
index:2(State:2 o=false p=false)&exit&0.0\\\hline
index:3(State:3 o=false p=false)&right&10.841409\\\hline
index:4(State:4 o=false p=false)&up&17.414087\\\hline
index:5(State:5 o=false p=false)&up&91.17414\\\hline
index:6(State:6 o=false p=false)&up&2.0826073\\\hline
index:7(State:7 o=false p=false)&up&10.688093\\\hline
index:8(State:8 o=false p=false)&left&1.7736696\\\hline
index:9(State:9 o=false p=false)&exit&0.0\\\hline
  \end{tabular}
  \caption{Greedy policy selection result for first scenario on simple MDP example when $\beta=0.1$}
  \label{tab:000}
\end{table}
\begin{table}
  \centering
  \tiny
  \begin{tabular} {|c|c|c|}
  \hline
  state&policy&value\\\hline  
index:0(State:0 o=false p=false)&right&98.84057\\\hline
index:1(State:1 o=false p=false)&right&99.89565\\\hline
index:2(State:2 o=false p=false)&exit&0.0\\\hline
index:3(State:3 o=false p=false)&right&98.05652\\\hline
index:4(State:4 o=false p=false)&up&98.84057\\\hline
index:5(State:5 o=false p=false)&up&99.89565\\\hline
index:6(State:6 o=false p=false)&up&89.17257\\\hline
index:7(State:7 o=false p=false)&up&97.1864\\\hline
index:8(State:8 o=false p=false)&left&87.6116\\\hline
index:9(State:9 o=false p=false)&exit&0.0\\\hline
  \end{tabular}
  \caption{Greedy policy selection result for first scenario on simple MDP example when $\beta=0.9$}
  \label{tab:001}
\end{table}
\subsection{Scenario 2}
Setting $01<\beta<0.9$ results in passing throw "state5" when we are at "state8". Because the discounted reward as you get to "state 0" is positive. Table \ref{tab:002} shows the policy when $\beta = 0.4$.
\begin{table}
  \centering
  \tiny
  \begin{tabular} {|c|c|c|}
  \hline
  state&policy&value\\\hline  
index:0(State:0 o=false p=false)&right&43.547855\\\hline
index:1(State:1 o=false p=false)&right&92.74191\\\hline
index:2(State:2 o=false p=false)&exit&0.0\\\hline
index:3(State:3 o=false p=false)&right&26.519142\\\hline
index:4(State:4 o=false p=false)&up&43.547855\\\hline
index:5(State:5 o=false p=false)&up&92.74191\\\hline
index:6(State:6 o=false p=false)&up&11.556471\\\hline
index:7(State:7 o=false p=false)&up&25.239487\\\hline
index:8(State:8 o=false p=false)&up&25.49667\\\hline
index:9(State:9 o=false p=false)&exit&0.0\\\hline
  \end{tabular}
  \caption{Greedy policy selection result for first scenario on simple MDP example when $\beta=0.4$}
  \label{tab:002}
\end{table}
\section{Parking lot MDP}
In our code we generate an MDP from a set of ParkingSlots read from the input file. The input file consists of the probabilities of spots being empty. The first half is "A" row's probability  and the second half is the "B" row's. Each half starts from [1] index. So A[1] is the first element of the first half of the list and B[1] is the first element of the second half.
We use the proposed strategy for generating states in the assignment. For each parking spot we generate the following 4 states: 1-(o=false, p=false), 2-(o=false, p=true), 3-(o=true, p=false), 4-(o=true, p=true), where o = occupied and p = parked. We also have a terminal state where the agent will enter after moving to a state where (p = true). So the transition probability for all states where (p = true) is "1" for terminal state. The transition probability for transition state to itself is also "1". We test our planning algorithm in the following two scenarios :
\subsection{Scenario 1 }
In this scenario we set discount factor $\beta = 0.9$ which is close to one. Table \ref{tab:003} shows the resulting policy and corresponding values for $\delta = 0.000001$. In this scenario we have a quadratic result with respect to the parking spot position. As we go further away from the shopping center the reward decreases quadratically. As we can see in the table for A[1] and B[1] the policy never parks. This is reasonable based on the negative reward we have for parking in these spots. From A[1]-A[4] and B[1]-B[4] the policy parks when the spot is empty. This is also a reasonable policy because these are close spots the shopping center. For A[5]-A[10] and B[5]-B[10] the policy never parks and it drives to the closest spots to get a higher reward. For all states that we have parked although the action is correct it seems that the value is always zero. The reason for this is because the termination state has zero reward and we are using greedy policy selection. If we do not use the greedy policy selection and just work with the values from value iteration function we have the results in table \ref{tab:004}. We can see that the actions are the same but the value is different for the parked states. Alternatively we can set the reward for the terminate state a very small number instead of the zero.
Note: This scenario can be found in "parkinglot1.txt". Generation of the quadratic rewards is in code. 
\begin{table}
  \centering
  \tiny
  \begin{tabular} {|c|c|c|}
  \hline
  state&policy&value\\\hline   
  index:0(State:A[1] o=false p=false)&Drive&213.69432\\\hline
index:1(State:A[1] o=false p=true)&Exit&0.0\\\hline
index:2(State:A[1] o=true p=false)&Drive&213.69432\\\hline
index:3(State:A[1] o=true p=true)&Exit&0.0\\\hline
index:4(State:A[2] o=false p=false)&Park&729.0\\\hline
index:5(State:A[2] o=false p=true)&Exit&0.0\\\hline
index:6(State:A[2] o=true p=false)&Drive&192.07489\\\hline
index:7(State:A[2] o=true p=true)&Exit&0.0\\\hline
index:8(State:A[3] o=false p=false)&Park&512.0\\\hline
index:9(State:A[3] o=false p=true)&Exit&0.0\\\hline
index:10(State:A[3] o=true p=false)&Drive&220.94064\\\hline
index:11(State:A[3] o=true p=true)&Exit&0.0\\\hline
index:12(State:A[4] o=false p=false)&Park&343.0\\\hline
index:13(State:A[4] o=false p=true)&Exit&0.0\\\hline
index:14(State:A[4] o=true p=false)&Drive&250.98726\\\hline
index:15(State:A[4] o=true p=true)&Exit&0.0\\\hline
index:16(State:A[5] o=false p=false)&Drive&250.48196\\\hline
index:17(State:A[5] o=false p=true)&Exit&0.0\\\hline
index:18(State:A[5] o=true p=false)&Drive&250.48196\\\hline
index:19(State:A[5] o=true p=true)&Exit&0.0\\\hline
index:20(State:A[6] o=false p=false)&Drive&225.18376\\\hline
index:21(State:A[6] o=false p=true)&Exit&0.0\\\hline
index:22(State:A[6] o=true p=false)&Drive&225.18376\\\hline
index:23(State:A[6] o=true p=true)&Exit&0.0\\\hline
index:24(State:A[7] o=false p=false)&Drive&202.41537\\\hline
index:25(State:A[7] o=false p=true)&Exit&0.0\\\hline
index:26(State:A[7] o=true p=false)&Drive&202.41537\\\hline
index:27(State:A[7] o=true p=true)&Exit&0.0\\\hline
index:28(State:A[8] o=false p=false)&Drive&181.92383\\\hline
index:29(State:A[8] o=false p=true)&Exit&0.0\\\hline
index:30(State:A[8] o=true p=false)&Drive&181.92383\\\hline
index:31(State:A[8] o=true p=true)&Exit&0.0\\\hline
index:32(State:A[9] o=false p=false)&Drive&163.48145\\\hline
index:33(State:A[9] o=false p=true)&Exit&0.0\\\hline
index:34(State:A[9] o=true p=false)&Drive&163.48145\\\hline
index:35(State:A[9] o=true p=true)&Exit&0.0\\\hline
index:36(State:A[10] o=false p=false)&Drive&146.8833\\\hline
index:37(State:A[10] o=false p=true)&Exit&0.0\\\hline
index:38(State:A[10] o=true p=false)&Drive&146.8833\\\hline
index:39(State:A[10] o=true p=true)&Exit&0.0\\\hline
index:40(State:B[1] o=false p=false)&Drive&237.71594\\\hline
index:41(State:B[1] o=false p=true)&Exit&0.0\\\hline
index:42(State:B[1] o=true p=false)&Drive&237.71594\\\hline
index:43(State:B[1] o=true p=true)&Exit&0.0\\\hline
index:44(State:B[2] o=false p=false)&Park&729.0\\\hline
index:45(State:B[2] o=false p=true)&Exit&0.0\\\hline
index:46(State:B[2] o=true p=false)&Drive&212.78513\\\hline
index:47(State:B[2] o=true p=true)&Exit&0.0\\\hline
index:48(State:B[3] o=false p=false)&Park&512.0\\\hline
index:49(State:B[3] o=false p=true)&Exit&0.0\\\hline
index:50(State:B[3] o=true p=false)&Drive&167.88211\\\hline
index:51(State:B[3] o=true p=true)&Exit&0.0\\\hline
index:52(State:B[4] o=false p=false)&Park&343.0\\\hline
index:53(State:B[4] o=false p=true)&Exit&0.0\\\hline
index:54(State:B[4] o=true p=false)&Drive&119.87637\\\hline
index:55(State:B[4] o=true p=true)&Exit&0.0\\\hline
index:56(State:B[5] o=false p=false)&Park&216.0\\\hline
index:57(State:B[5] o=false p=true)&Exit&0.0\\\hline
index:58(State:B[5] o=true p=false)&Drive&98.10536\\\hline
index:59(State:B[5] o=true p=true)&Exit&0.0\\\hline
index:60(State:B[6] o=false p=false)&Park&125.0\\\hline
index:61(State:B[6] o=false p=true)&Exit&0.0\\\hline
index:62(State:B[6] o=true p=false)&Drive&85.709335\\\hline
index:63(State:B[6] o=true p=true)&Exit&0.0\\\hline
index:64(State:B[7] o=false p=false)&Drive&95.510376\\\hline
index:65(State:B[7] o=false p=true)&Exit&0.0\\\hline
index:66(State:B[7] o=true p=false)&Drive&95.510376\\\hline
index:67(State:B[7] o=true p=true)&Exit&0.0\\\hline
index:68(State:B[8] o=false p=false)&Drive&106.40042\\\hline
index:69(State:B[8] o=false p=true)&Exit&0.0\\\hline
index:70(State:B[8] o=true p=false)&Drive&106.40042\\\hline
index:71(State:B[8] o=true p=true)&Exit&0.0\\\hline
index:72(State:B[9] o=false p=false)&Drive&118.50047\\\hline
index:73(State:B[9] o=false p=true)&Exit&0.0\\\hline
index:74(State:B[9] o=true p=false)&Drive&118.50047\\\hline
index:75(State:B[9] o=true p=true)&Exit&0.0\\\hline
index:76(State:B[10] o=false p=false)&Drive&131.94496\\\hline
index:77(State:B[10] o=false p=true)&Exit&0.0\\\hline
index:78(State:B[10] o=true p=false)&Drive&131.94496\\\hline
index:79(State:B[10] o=true p=true)&Exit&0.0\\\hline
index:80(Terminate o=false p=false)&Exit&0.0\\\hline
  \end{tabular}
  \caption{Greedy policy selection result for first scenario}
  \label{tab:003}
\end{table}

\begin{table}
  \centering
  \tiny
  \begin{tabular} {|c|c|c|}
  \hline
  state&policy&value\\\hline   
index:0(State:A[1] o=false p=false)&Drive&192.07489\\\hline
index:1(State:A[1] o=false p=true)&Exit&-1.0\\\hline
index:2(State:A[1] o=true p=false)&Drive&192.07489\\\hline
index:3(State:A[1] o=true p=true)&Exit&-2.0\\\hline
index:4(State:A[2] o=false p=false)&Park&655.85\\\hline
index:5(State:A[2] o=false p=true)&Exit&729.0\\\hline
index:6(State:A[2] o=true p=false)&Drive&172.6174\\\hline
index:7(State:A[2] o=true p=true)&Exit&-2.0\\\hline
index:8(State:A[3] o=false p=false)&Park&460.55\\\hline
index:9(State:A[3] o=false p=true)&Exit&512.0\\\hline
index:10(State:A[3] o=true p=false)&Drive&198.59657\\\hline
index:11(State:A[3] o=true p=true)&Exit&-2.0\\\hline
index:12(State:A[4] o=false p=false)&Park&308.44998\\\hline
index:13(State:A[4] o=false p=true)&Exit&343.0\\\hline
index:14(State:A[4] o=true p=false)&Drive&225.63853\\\hline
index:15(State:A[4] o=true p=true)&Exit&-2.0\\\hline
index:16(State:A[5] o=false p=false)&Drive&225.18376\\\hline
index:17(State:A[5] o=false p=true)&Exit&216.0\\\hline
index:18(State:A[5] o=true p=false)&Drive&225.18376\\\hline
index:19(State:A[5] o=true p=true)&Exit&-2.0\\\hline
index:20(State:A[6] o=false p=false)&Drive&202.41537\\\hline
index:21(State:A[6] o=false p=true)&Exit&125.0\\\hline
index:22(State:A[6] o=true p=false)&Drive&202.41537\\\hline
index:23(State:A[6] o=true p=true)&Exit&-2.0\\\hline
index:24(State:A[7] o=false p=false)&Drive&181.92383\\\hline
index:25(State:A[7] o=false p=true)&Exit&64.0\\\hline
index:26(State:A[7] o=true p=false)&Drive&181.92383\\\hline
index:27(State:A[7] o=true p=true)&Exit&-2.0\\\hline
index:28(State:A[8] o=false p=false)&Drive&163.48145\\\hline
index:29(State:A[8] o=false p=true)&Exit&27.0\\\hline
index:30(State:A[8] o=true p=false)&Drive&163.48145\\\hline
index:31(State:A[8] o=true p=true)&Exit&-2.0\\\hline
index:32(State:A[9] o=false p=false)&Drive&146.8833\\\hline
index:33(State:A[9] o=false p=true)&Exit&8.0\\\hline
index:34(State:A[9] o=true p=false)&Drive&146.8833\\\hline
index:35(State:A[9] o=true p=true)&Exit&-2.0\\\hline
index:36(State:A[10] o=false p=false)&Drive&131.94496\\\hline
index:37(State:A[10] o=false p=true)&Exit&1.0\\\hline
index:38(State:A[10] o=true p=false)&Drive&131.94496\\\hline
index:39(State:A[10] o=true p=true)&Exit&-2.0\\\hline
index:40(State:B[1] o=false p=false)&Drive&213.69434\\\hline
index:41(State:B[1] o=false p=true)&Exit&-1.0\\\hline
index:42(State:B[1] o=true p=false)&Drive&213.69434\\\hline
index:43(State:B[1] o=true p=true)&Exit&-2.0\\\hline
index:44(State:B[2] o=false p=false)&Park&655.85\\\hline
index:45(State:B[2] o=false p=true)&Exit&729.0\\\hline
index:46(State:B[2] o=true p=false)&Drive&191.2566\\\hline
index:47(State:B[2] o=true p=true)&Exit&-2.0\\\hline
index:48(State:B[3] o=false p=false)&Park&460.55\\\hline
index:49(State:B[3] o=false p=true)&Exit&512.0\\\hline
index:50(State:B[3] o=true p=false)&Drive&150.8439\\\hline
index:51(State:B[3] o=true p=true)&Exit&-2.0\\\hline
index:52(State:B[4] o=false p=false)&Park&308.44998\\\hline
index:53(State:B[4] o=false p=true)&Exit&343.0\\\hline
index:54(State:B[4] o=true p=false)&Drive&107.63873\\\hline
index:55(State:B[4] o=true p=true)&Exit&-2.0\\\hline
index:56(State:B[5] o=false p=false)&Park&194.15\\\hline
index:57(State:B[5] o=false p=true)&Exit&216.0\\\hline
index:58(State:B[5] o=true p=false)&Drive&88.04482\\\hline
index:59(State:B[5] o=true p=true)&Exit&-2.0\\\hline
index:60(State:B[6] o=false p=false)&Park&112.25\\\hline
index:61(State:B[6] o=false p=true)&Exit&125.0\\\hline
index:62(State:B[6] o=true p=false)&Drive&76.8884\\\hline
index:63(State:B[6] o=true p=true)&Exit&-2.0\\\hline
index:64(State:B[7] o=false p=false)&Drive&85.709335\\\hline
index:65(State:B[7] o=false p=true)&Exit&64.0\\\hline
index:66(State:B[7] o=true p=false)&Drive&85.709335\\\hline
index:67(State:B[7] o=true p=true)&Exit&-2.0\\\hline
index:68(State:B[8] o=false p=false)&Drive&95.510376\\\hline
index:69(State:B[8] o=false p=true)&Exit&27.0\\\hline
index:70(State:B[8] o=true p=false)&Drive&95.510376\\\hline
index:71(State:B[8] o=true p=true)&Exit&-2.0\\\hline
index:72(State:B[9] o=false p=false)&Drive&106.40042\\\hline
index:73(State:B[9] o=false p=true)&Exit&8.0\\\hline
index:74(State:B[9] o=true p=false)&Drive&106.40042\\\hline
index:75(State:B[9] o=true p=true)&Exit&-2.0\\\hline
index:76(State:B[10] o=false p=false)&Drive&118.500465\\\hline
index:77(State:B[10] o=false p=true)&Exit&1.0\\\hline
index:78(State:B[10] o=true p=false)&Drive&118.500465\\\hline
index:79(State:B[10] o=true p=true)&Exit&-2.0\\\hline
index:80(Terminate o=false p=false)&Exit&0.0\\\hline  \end{tabular}
  \caption{Using the values of the value iteration without using the greedy value selection}
  \label{tab:004}
\end{table}
\subsection{Scenario 2} 

In this scenario we use different probabilities for parking spots and give higher probability to closer spots to be free. In addition instead of a quadratic reward function with respect of the distance we use a linear function the difference between the rewards are not that significant anymore. As a result we can see that in table \ref{tab:005} for A[1] and B[1] we have the similar results because the reward is negative. But for the A[2]-A[6] it will park. From A[6]-A[10] the policy tries to go closer to the shopping center because in this scenario we have a higher probability for closer spots to be free. But for Bs spots it is different. From B[2]-B[9] it tries to park because the negative cost of driving cancels the discounted reward we can obtain. And just for B[10] it will choose to still check for another empty spot. This is reasonable because if we are at B[10] we are only few spots away from high reward spots. 
Note: This scenario can be found in "parkinglot2.txt". Generation of the linear rewards is commented in code.
\begin{table}
  \centering
  \tiny
  \begin{tabular} {|c|c|c|}
  \hline
  state&policy&value\\\hline 
index:0(State:A[1] o=false p=false)&Drive&3.8585143\\\hline
index:1(State:A[1] o=false p=true)&Exit&0.0\\\hline
index:2(State:A[1] o=true p=false)&Drive&3.8585143\\\hline
index:3(State:A[1] o=true p=true)&Exit&0.0\\\hline
index:4(State:A[2] o=false p=false)&Park&9.0\\\hline
index:5(State:A[2] o=false p=true)&Exit&0.0\\\hline
index:6(State:A[2] o=true p=false)&Drive&3.222663\\\hline
index:7(State:A[2] o=true p=true)&Exit&0.0\\\hline
index:8(State:A[3] o=false p=false)&Park&8.0\\\hline
index:9(State:A[3] o=false p=true)&Exit&0.0\\\hline
index:10(State:A[3] o=true p=false)&Drive&3.1703568\\\hline
index:11(State:A[3] o=true p=true)&Exit&0.0\\\hline
index:12(State:A[4] o=false p=false)&Park&7.0\\\hline
index:13(State:A[4] o=false p=true)&Exit&0.0\\\hline
index:14(State:A[4] o=true p=false)&Drive&4.341993\\\hline
index:15(State:A[4] o=true p=true)&Exit&0.0\\\hline
index:16(State:A[5] o=false p=false)&Park&6.0\\\hline
index:17(State:A[5] o=false p=true)&Exit&0.0\\\hline
index:18(State:A[5] o=true p=false)&Drive&4.614676\\\hline
index:19(State:A[5] o=true p=true)&Exit&0.0\\\hline
index:20(State:A[6] o=false p=false)&Park&5.0\\\hline
index:21(State:A[6] o=false p=true)&Exit&0.0\\\hline
index:22(State:A[6] o=true p=false)&Drive&4.401925\\\hline
index:23(State:A[6] o=true p=true)&Exit&0.0\\\hline
index:24(State:A[7] o=false p=false)&Drive&4.0346932\\\hline
index:25(State:A[7] o=false p=true)&Exit&0.0\\\hline
index:26(State:A[7] o=true p=false)&Drive&4.0346932\\\hline
index:27(State:A[7] o=true p=true)&Exit&0.0\\\hline
index:28(State:A[8] o=false p=false)&Drive&3.3812242\\\hline
index:29(State:A[8] o=false p=true)&Exit&0.0\\\hline
index:30(State:A[8] o=true p=false)&Drive&3.3812242\\\hline
index:31(State:A[8] o=true p=true)&Exit&0.0\\\hline
index:32(State:A[9] o=false p=false)&Drive&2.7931018\\\hline
index:33(State:A[9] o=false p=true)&Exit&0.0\\\hline
index:34(State:A[9] o=true p=false)&Drive&2.7931018\\\hline
index:35(State:A[9] o=true p=true)&Exit&0.0\\\hline
index:36(State:A[10] o=false p=false)&Drive&2.2637916\\\hline
index:37(State:A[10] o=false p=true)&Exit&0.0\\\hline
index:38(State:A[10] o=true p=false)&Drive&2.2637916\\\hline
index:39(State:A[10] o=true p=true)&Exit&0.0\\\hline
index:40(State:B[1] o=false p=false)&Drive&4.565016\\\hline
index:41(State:B[1] o=false p=true)&Exit&0.0\\\hline
index:42(State:B[1] o=true p=false)&Drive&4.565016\\\hline
index:43(State:B[1] o=true p=true)&Exit&0.0\\\hline
index:44(State:B[2] o=false p=false)&Park&9.0\\\hline
index:45(State:B[2] o=false p=true)&Exit&0.0\\\hline
index:46(State:B[2] o=true p=false)&Drive&4.9444637\\\hline
index:47(State:B[2] o=true p=true)&Exit&0.0\\\hline
index:48(State:B[3] o=false p=false)&Park&8.0\\\hline
index:49(State:B[3] o=false p=true)&Exit&0.0\\\hline
index:50(State:B[3] o=true p=false)&Drive&4.2860446\\\hline
index:51(State:B[3] o=true p=true)&Exit&0.0\\\hline
index:52(State:B[4] o=false p=false)&Park&7.0\\\hline
index:53(State:B[4] o=false p=true)&Exit&0.0\\\hline
index:54(State:B[4] o=true p=false)&Drive&3.7334163\\\hline
index:55(State:B[4] o=true p=true)&Exit&0.0\\\hline
index:56(State:B[5] o=false p=false)&Park&6.0\\\hline
index:57(State:B[5] o=false p=true)&Exit&0.0\\\hline
index:58(State:B[5] o=true p=false)&Drive&3.376697\\\hline
index:59(State:B[5] o=true p=true)&Exit&0.0\\\hline
index:60(State:B[6] o=false p=false)&Park&5.0\\\hline
index:61(State:B[6] o=false p=true)&Exit&0.0\\\hline
index:62(State:B[6] o=true p=false)&Drive&2.5741582\\\hline
index:63(State:B[6] o=true p=true)&Exit&0.0\\\hline
index:64(State:B[7] o=false p=false)&Park&4.0\\\hline
index:65(State:B[7] o=false p=true)&Exit&0.0\\\hline
index:66(State:B[7] o=true p=false)&Drive&1.8448837\\\hline
index:67(State:B[7] o=true p=true)&Exit&0.0\\\hline
index:68(State:B[8] o=false p=false)&Park&3.0\\\hline
index:69(State:B[8] o=false p=true)&Exit&0.0\\\hline
index:70(State:B[8] o=true p=false)&Drive&1.3191216\\\hline
index:71(State:B[8] o=true p=true)&Exit&0.0\\\hline
index:72(State:B[9] o=false p=false)&Park&2.0\\\hline
index:73(State:B[9] o=false p=true)&Exit&0.0\\\hline
index:74(State:B[9] o=true p=false)&Drive&1.3586712\\\hline
index:75(State:B[9] o=true p=true)&Exit&0.0\\\hline
index:76(State:B[10] o=false p=false)&Drive&1.7874125\\\hline
index:77(State:B[10] o=false p=true)&Exit&0.0\\\hline
index:78(State:B[10] o=true p=false)&Drive&1.7874125\\\hline
index:79(State:B[10] o=true p=true)&Exit&0.0\\\hline
index:80(Terminate o=false p=false)&Exit&0.0\\\hline
\end{tabular}
  \caption{Greedy policy selection result for second scenario}
  \label{tab:005}
\end{table}
\end{document}
