<html>
  <head>
  <title>mdp.py</title>
  </head>
  <body>
  <h3>mdp.py (<a href="../mdp.py">original</a>)</h3>
  <hr>
  <pre>
<span style="color: green; font-style: italic"># mdp.py
# ------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html

</span><span style="color: blue; font-weight: bold">import </span>random

<span style="color: blue; font-weight: bold">class </span>MarkovDecisionProcess<span style="font-weight: bold">:
    
  </span><span style="color: blue; font-weight: bold">def </span>getStates<span style="font-weight: bold">(</span><span style="color: blue">self</span><span style="font-weight: bold">):
    </span><span style="color: darkred">"""
    Return a list of all states in the MDP.
    Not generally possible for large MDPs.
    """
    </span>abstract
        
  <span style="color: blue; font-weight: bold">def </span>getStartState<span style="font-weight: bold">(</span><span style="color: blue">self</span><span style="font-weight: bold">):
    </span><span style="color: darkred">"""
    Return the start state of the MDP.
    """
    </span>abstract
    
  <span style="color: blue; font-weight: bold">def </span>getPossibleActions<span style="font-weight: bold">(</span><span style="color: blue">self</span><span style="font-weight: bold">, </span>state<span style="font-weight: bold">):
    </span><span style="color: darkred">"""
    Return list of possible actions from 'state'.
    """
    </span>abstract
        
  <span style="color: blue; font-weight: bold">def </span>getTransitionStatesAndProbs<span style="font-weight: bold">(</span><span style="color: blue">self</span><span style="font-weight: bold">, </span>state<span style="font-weight: bold">, </span>action<span style="font-weight: bold">):
    </span><span style="color: darkred">"""
    Returns list of (nextState, prob) pairs
    representing the states reachable
    from 'state' by taking 'action' along
    with their transition probabilities.  
    
    Note that in Q-Learning and reinforcment
    learning in general, we do not know these
    probabilities nor do we directly model them.
    """
    </span>abstract
        
  <span style="color: blue; font-weight: bold">def </span>getReward<span style="font-weight: bold">(</span><span style="color: blue">self</span><span style="font-weight: bold">, </span>state<span style="font-weight: bold">, </span>action<span style="font-weight: bold">, </span>nextState<span style="font-weight: bold">):
    </span><span style="color: darkred">"""
    Get the reward for the state, action, nextState transition.
    
    Not available in reinforcement learning.
    """
    </span>abstract

  <span style="color: blue; font-weight: bold">def </span>isTerminal<span style="font-weight: bold">(</span><span style="color: blue">self</span><span style="font-weight: bold">, </span>state<span style="font-weight: bold">):
    </span><span style="color: darkred">"""
    Returns true if the current state is a terminal state.  By convention,
    a terminal state has zero future rewards.  Sometimes the terminal state(s)
    may have no possible actions.  It is also common to think of the terminal
    state as having a self-loop action 'pass' with zero reward; the formulations
    are equivalent.
    """
    </span>abstract

    

  </pre>
  </body>
  </html>
  