"""

Computational Economics
2.5: Application: Finite State Optimal Growth
http://johnstachurski.net/lectures/finite_growth.html

REFERENCES
http://en.wikipedia.org/wiki/Dynamic_programming
http://en.wikipedia.org/wiki/Greek_letters_used_in_mathematics,_science,_and_engineering
http://en.wikipedia.org/wiki/Expected_value
http://en.wikipedia.org/wiki/Gamma#Upper_case
http://en.wikipedia.org/wiki/Rho#Math_and_science
http://en.wikipedia.org/wiki/Goodness_of_fit
http://en.wikipedia.org/wiki/State_space_%28dynamical_system%29

DEFINITIONS
    Dynamic Programming
    "In mathematics and computer science, dynamic programming is a method for
    solving complex problems by breaking them down into simpler subproblems."

    iid
    "iid means independent and identically distributed, which means the random
    variables not only are independent, but also have the same distribution"
    (http://www.sosmath.com/CBB/viewtopic.php?p=180637)

    State Space
    "In the theory of discrete dynamical systems, a state space is a directed
    graph where each possible state of a dynamical system is represented"

    Standardized (Beta) Coefficient
    "In statistics, standardized coefficients or beta coefficients are the
    estimates resulting from an analysis carried out on variables that have
    been standardized so that their variances are 1. This means that they refer
    to the expected change in the dependent variable, per standard deviation
    increase in the predictor variable."

OUTLINE OF PROBLEM

    VARIABLES
    W:  random quantity Colonel bags -> random variable on 0,...,B
        with distribution q
    B:  upper bound of what Colonel can catch
    M:  amount of catfish Colonel's freezer can hold
    Xt: stock of fish at noon on day t [state variable]
    Ct: quantity fish Colonel consumes
    Rt: quantity Colonel freeze -> Rt = Xt - Ct
    g:  policy function g(X) = R
    S:  state space (possible values for X)
    G:  set of all feasible policies
    U:  Utility function
    E:  Expected value (Epsilon)


    VALUE ITERATION ALGORITHM
    Iterate with T on some initial v in bS, producing (Tnv)

    Continue iteration until
        d\inf/(T(^n)v,T(^n+1)v <= some small, positive tolerance
        d_infinity = lambda v, w: max(abs(v[x] - w[x]) for x in S)


    Computer a w-greedy policy g, where w is the final iterate

    If the tolerance is small, then g is almost optimal


    PROBLEM
    Formulate a simple, finite state optimal growth problem. Model behavior of
    Colonel Kurtz, who can be found on small island in the Nung river living on
    catfish.

    Catfish bite at dawn, colonel bags a random quantity W. Catfish last only if
    refrigerated, and the Colonel's freezer holds up to M fish. Let Xt be the
    state variable: stock of fish at noon on day t. In the evening, the
    Colonel consumes quantity Ct, and freezes Rt = Xt - Ct

    Solve for the optimal policy:

    * Implement the Bellman operator T as a Python function
          o Elements of bS can be represented as lists or tuples
                + v[0] corresponds to v(0), etc.
          o Takes such a sequence v as an argument and
          o returns another sequence representing Tv
    * Write a function greedy() to compute greedy policies
    * Implement the loop as a function main()
          o Use 0.001 for the tolerance

    For the utility function set
        U(c) = c^BETA, BETA,c >= 0

    For the parameters, use beta, rho, B, M = 0.5, 0.9, 10, 5

    Let q be uniform on 0,...,B

    Plot the sequence of functions created in the value iteration algorithm
        http://johnstachurski.net/lectures/images/value_functions.png

    Print the greedy policy (as a list) calcuated from the last iterate

    The solution is

        >>> g
        [0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 5, 5, 5, 5]
"""
import pylab

beta = 0.5      # beta coefficient
rho  = 0.9      # correlation coefficient
B    = 10       # max Colonel can catch
M    = 5        # max fish Colonel's freezer can hold

S = range(B + M + 1)  # State space
Z = range(B + 1)      # Shock space

expected_solution = [0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 5, 5, 5, 5]

def d_infty(v, w):
    return max(abs(v[x] - w[x]) for x in S)

def U(c):
    "Utility function."
    return c**beta

def q(z):
    "Probability mass function, uniform distribution."
    return 1.0 / len(Z) if 0 <= z <= B else 0

def Gamma(x):
    "The correspondence of feasible actions."
    return range(min(x, M) + 1)

def T(v):
    """An implementation of the Bellman operator.
    Parameters: v is a sequence representing a function defined on S.
    Returns: Tv, a list.

    S: state space (range(16))
    Gamma(x): correspondence of feasible actions (at x)
    U(x - a): utility function for feasible action (input: state - feasible action)
    rho: correlation coefficient (goodness of fit)
    """
    Tv = []

    # determine max y given each feasible action for each state in S
    for x in S:
        vals = []  # Stores rewards for a in Gamma(x)

        # correspondence of feasible actions
        for a in Gamma(x):

            # sum(v[a + z] * q(z) for z in Z) -> sum of current state value at
            # feasible action + shock * Probability mass function
            y = U(x - a) + rho * sum(v[a + z] * q(z) for z in Z)
            vals.append(y)

        Tv.append(max(vals))
    return Tv

def greedy(v):
    g = []
    for x in S:
        runningmax = -1
        for a in Gamma(x):
            y = U(x - a) + rho * sum(v[a + z] * q(z) for z in Z)
            if y > runningmax:
                runningmax = y
                maximizer = a
        g.append(maximizer)
    return g

def stachurski():
    # utility sequence for current state space (sequence representing a
    # function defined on S)
    current = [utility_function(x) for x in S]
    tolerance = 0.001

    #
    while 1:
        pylab.plot(current, 'bo-')

        # new utility sequence using Bellman operator based on current sequence
        new = T(current)


        if d_infty(new, current) < tolerance:
            break

        current = new
    g = greedy(new)
    print 'The optimal policy is:'
    print g
    pylab.show()


#
# Deconstructed Solution
#
beta_coefficient = 0.5          # the expected change in the dependent variable,
                                # per standard deviation increase in the
                                # predictor variable
rho_coefficient = 0.9           # how well does this data fit (correlation)
max_shock = 10                  # max can Colonel can catch (B)
max_utility = 5                 # how much fish Colonel's freezer can hold (M)

state_space = range(max_shock + max_utility + 1)    # S
shock_space = range(max_shock + 1)                  # Z

def measure_discrepancy(new_utility_space, utility_space):
    return max(abs(new_utility_space[state] - utility_space[state]) for state in state_space)

def calculate_reward(state):
    return state**beta_coefficient

def get_feasible_action_state(state):
    "Gamma: the correspondence of feasible actions."
    return range(min(state, max_utility) + 1)

def get_shock_probability(shock_state):
    """q(z) := Probability mass function, uniform distribution."""
    return 1.0 / len(shock_space) if (0 <= shock_state <= max_shock) else 0

def calculate_reward_for_proposed_action(action, state, rho, reward_space, shock_space):
    """y = U(x - a) + rho * sum(v[a + z] * q(z) for z in Z)"""
    estimate_current_reward = lambda s, a: calculate_reward(s - a)
    get_effect_estimate = lambda r, effect_estimates: r * sum(effect_estimates)
    estimate_effects = lambda a, Z: [calculate_effect(a, z) for z in Z]
    calculate_effect = lambda a, z: reward_space[a + z] * get_shock_probability(z)
    return estimate_current_reward(state, action) + get_effect_estimate(
        rho, estimate_effects(action, shock_space))

def bellman_operator(reward_space):
    """An implementation of the Bellman operator.
    Parameters: utility_space is a sequence representing a function defined on state_space.
    Returns: Tv, a list -- a new utility space

    S: state space (range(16))
    Gamma(x): correspondence of feasible actions (at x)
    U(x - a): utility function for feasible action (input: state - feasible action)
    rho: correlation coefficient (goodness of fit)
    """
    new_reward_space = []

    # determine max y given each feasible action for each state in S
    for state in state_space:
        reward_estimates = []  # Stores rewards for a in Gamma(x)

        # correspondence of feasible actions
        for action in get_feasible_action_state(state):

            # y = U(x - a) + rho * sum(v[a + z] * q(z) for z in Z)
            reward_estimate = calculate_reward_for_proposed_action(
                action, state, rho, reward_space, shock_space)
            reward_estimates.append(reward_estimate)

        new_reward_space.append(max(reward_estimates))
    return new_reward_space

def maximize_reward_model(reward_space):
    """greedy(v)"""
    max_reward_space = []
    for state in state_space:
        running_max = -1
        for action in get_feasible_action_state(state):
            reward_estimate = calculate_reward_for_proposed_action(
                action, state, rho, reward_space, shock_space)
            if reward_estimate > running_max:
                running_max = reward_estimate
                most_useful_action = action
        max_reward_space.append(most_useful_action)
    return max_reward_space

def main():
    # get starting utility_space (current)
    current_reward_state = [calculate_reward(state) for state in state_space]
    tolerance = 0.001

    # maximize/converge utility space
    while 1:
        pylab.plot(current_reward_state, 'bo-')
        new_reward_space = bellman_operator(current_reward_state)
        if measure_discrepancy(new_reward_space, current_reward_state) < tolerance:
            break
        current_reward_state = new_reward_space

    print new_reward_space
    optimal_policy_model = maximize_reward_model(new_reward_space)
    assert optimal_policy_model == expected_solution
    print 'The optimal policy is:'
    print optimal_policy_model
    pylab.title('Optimal Catfish Policy')
    pylab.xlabel('state')
    pylab.ylabel('utility')

    pylab.show()



#
# MAIN
#
if __name__ == "__main__":
    main()
    print '%s: ok' % (__file__)
