"""
    Computational Economics
    3.3: Application: Finite Markov Chains
    http://johnstachurski.net/lectures/markovdynam.html

    DEFINITIONS
    Finite Markov Chain
    If the state space (of a Markov chain) is finite, the transition
    probability distribution can be represented by a matrix, called the
    transition matrix...
    http://en.wikipedia.org/wiki/Markov_chain#Finite_state_space

    Stationary Distribution
    A stationary distribution is a specific entity which is unchanged by the
    effect of some matrix or operator: it need not be unique. Thus stationary
    distributions are related to eigenvectors for which the eigenvalue is unity.
    http://en.wikipedia.org/wiki/Stationary_distribution

    Law of Large Numbers (LLN)
    In probability theory, the law of large numbers (LLN) is a theorem that
    describes the result of performing the same experiment a large number of
    times. According to the law, the average of the results obtained from a
    large number of trials should be close to the expected value, and will tend
    to become closer as more trials are performed.
    http://en.wikipedia.org/wiki/Law_of_large_numbers


    REFERENCES


    PROBLEM



"""
DEBUG = True

from random import randint
from pprint import pformat

def d(m, n=1):
    global DEBUG
    if DEBUG and randint(1,n) == 1:
        print "\n%s" % (m),
    else:
        print '.',

def output(r, m=''):
    if m: m = "%s\n" % (m)
    print """
%s
result: %s

""" % (m, pformat(r))


#
# EXERCISES
#


"""
PROBLEM 1

Write a function which takes

    * a Markov kernel (matrix) p,
    * an initial condition init which represents X[0], and
    * a positive integer sample_size

and returns a sample path of length sample_size

Hint: Use the DiscreteRV class from this lecture

To test your solution you can use the small matrix

p = np.array([[.4, .6], [.2, .8]])

For a long series, the fraction of the sample that takes value 0 will be about 0.25
"""
import numpy as np
from numpy import cumsum
from numpy.random import uniform


class DiscreteRV:
    """
    Each instance is provided with an array of probabilities q.
    The draw() method returns x with probability q[x].
    """

    def __init__(self, q):
        self.set_q(q)

    def set_q(self, q):
        self.Q = np.cumsum(q)   # Cumulative sum

    def draw(self, n=1):
        """
        Returns n draws from q
        """
        return self.Q.searchsorted(uniform(0, 1, size=n))

class RaffleDrum(DiscreteRV): pass



def sample_path(p, init=0, sample_size=1000):
    """
    A function that generates sample paths of a finite Markov chain with
    kernel p on state space S = [0,...,N-1], starting from state init.

    Parameters:

        * p is a 2D NumPy array, nonnegative, rows sum to 1
        * init is an integer in S
        * sample_size is an integer

    Returns: A flat NumPy array containing the sample
    """
    N = len(p)

    # Let P[x] be the distribution corresponding to p[x,:]
    # P is thus a list of weighted random variable selectors
    P = []
    for x in range(N):
        rv = DiscreteRV(p[x,:])
        d("p[x,:] => p[%s,:%s] %s" % (x, len(p), p[x,:]))
       # d("DiscreteRV(p[x,:]) = %s" % (rv), 3)
        P.append(rv)
    print 'P: %s' % (P)

    X = np.empty(sample_size, dtype=int)
    X[0] = init
    for t in range(sample_size - 1):
        # X[t+1] = P[X[t]].draw() => 0 or 1
        X[t+1] = P[X[t]].draw()
        d("X[t+1] = P[X[t]].draw() => %s" % (X[t+1]), 10)
    return X

def raffle_path(raffle_probability_dists, init=0, sample_size=1000):
    num_raffle_probability_dists = len(raffle_probability_dists)

    raffle_drums = []
    for n in range(num_raffle_probability_dists):
        raffle_drum = RaffleDrum(raffle_probability_dists[n,:])
        raffle_drums.append(raffle_drum)

    markov_matrix = np.empty(sample_size, dtype=int)
    markov_matrix[0] = init
    for sample in range(sample_size - 1):
        last_raffle_winner = markov_matrix[sample]
        this_winner = raffle_drums[last_raffle_winner].draw()
        markov_matrix[sample+1] = this_winner

    raffle_winners = markov_matrix
    return raffle_winners


def exercise_1():
    choice = ['fight', 'fuck', 'flee']
    init_action = choice.index('flee')
    action_probabilities = np.array([[.4, .3, .3], [.1, .3, .6], [.2, .2, .6]])

    action_path = raffle_path(action_probabilities, init_action, 100)

    print [choice[n] for n in action_path]



"""
PROBLEM 2

Write a function which takes p as a parameter and returns the stationary
distribution, assuming that it is unique

You can test it using the matrix

p = np.array([[.4, .6], [.2, .8]])

The (unique) stationary distribution is (0.25, 0.75)
"""
def stationary1(p):
    """
    Parameters:

        * p is a 2D NumPy array, assumed to be stationary

    Returns: A flat array giving the stationary distribution
    """
    N = len(p)                               # p is N x N
    d('input array (p):\n %s' % (p))
    d('len(p) (N):\n %s' % (N))

    I = np.identity(N)                       # Identity matrix
    d('Identity Matrix (I = np.identity(N)):\n %s' % (I))

    B, b = np.ones((N, N)), np.ones((N, 1))  # Matrix and vector of ones
    d('Matrix (B = np.ones((N, N))):\n %s' % (B))
    d('Vector of Ones (b = np.ones((N, 1))):\n %s' % (b))

    A = np.transpose(I - p + B)
    d('Transpositon (np.transpose(I - p + B)):\n %s' % (A))

    solution = np.linalg.solve(A, b)
    d('solution: (solution = np.linalg.solve(A, b)):\n %s' % (solution))

    return solution.flatten()                # Return a flat array

from collections import defaultdict
def my_stationary_dist(pmd_matrix):
    for pmd in pmd_matrix:
        if len(pmd_matrix) != len(pmd):
            raise Exception('pmd_matrix must be n * n: %s' % (pmd_matrix))
        if sum(pmd) != 1:
            raise Exception('each pmd in matrix must sum to 1: %s' % (pmd))

    outcomes = range(len(pmd))
    last_outcome = 0
    results = defaultdict(int)

    total = 10000
    for n in range(total):
        pmd = pmd_matrix[last_outcome]
        rv = DiscreteRV(pmd)
        result = rv.draw()
        last_outcome = result[0]
        results[last_outcome] += 1

    normed_results = {}
    for i in results:
        normed_results[i] = float(results[i]) / total

    return normed_results

import time
def timeit(f, *args, **kw):
    global DEBUG
    d0 = DEBUG
    DEBUG = False
    n = kw.get('n', 10)
    print 'timing over %s iterations' % (n)
    t0 = time.time()
    for i in range(n):
        f(*args)
    DEBUG = d0
    return '%.2f' % (time.time() - t0)

def exercise_2():
    """Assuming each row represents a pmd, the results (s1) represents the
    overall probability you get result 0 or 1 (by index)"""
    p = np.array([[.4, .6], [.2, .8]])
    s1 = stationary1(p)
    s2 = my_stationary_dist(p)

    # s1 == s2 -> True
    print s1, s2.values()

    # timeit
    r1 = timeit(stationary1, p)
    r2 = timeit(my_stationary_dist, p)
    print r1, r2


"""
PROBLEM 3

Write a function to compute q this way, assuming stability of p

Use sample_path() (see above) to generate the sample path

Again, you can test it using the matrix

p = np.array([[.4, .6], [.2, .8]])

Results should agree with previous solution
"""
import numpy as np


def stationary2(p, n=1000):
    """
    Computes the stationary distribution via the LLN for stable
    Markov chains.

    Parameters:

        * p is a 2D NumPy array
        * n is a positive integer giving the sample size

    Returns: An array containing the estimated stationary distribution
    """
    N = len(p)
    X = raffle_path(p, sample_size=n)    # Sample path starting from X = 0

    # takes the average of times raffle_path result == the p value, this
    # approaches the estimated stationary distribution for that value
    q = [np.mean(X == y) for y in range(N)]
    d('raffle_path (X): %s' % (X))
    d('np.mean(X == y) for y = 0: %s' % (np.mean(X == 0)))

    return np.array(q)

def exercise_3():
    p = np.array([[.4, .6], [.2, .8]])
    r1 = stationary2(p, 1000)
    r2 = stationary2(p, 1000)
    r3 = stationary1(p)
    output({
        'stationary2 (1)' : r1,
        'stationary2 (2)' : r2,
        'stationary1' : r3,
    })

    # compare stationary 1 and 2
    s1 = timeit(stationary1, p)
    s2 = timeit(stationary2, p, 1000)
    print s1, s2

"""
PROBLEM 4

Modify the function stationary2() above

    * Providing a keyword argument lae such that
          o lae=True returns the look-ahead estimator instead
          o lae=False returns the standard Monte Carlo estimator
          o The default is False

Again, you can test it using the matrix

p = np.array([[.4, .6], [.2, .8]])

Results should agree with previous solutions
"""
import numpy as np


def stationary3(p, n=1000, lae=False):
    """
    Computes the stationary distribution via the LLN for stable
    Markov chains.

    Parameters:

        * p is a 2D NumPy array
        * n is a positive integer giving the sample size
        * lae is a flag indicating whether to use the look-ahead method

    Returns: An array containing the estimated stationary distribution
    """
    N = len(p)
    X = raffle_path(p, sample_size=n)    # Sample path starting from X = 0

    # p[X,y] == p[raffle_path,y]
    if lae:
        d("p[X,y] => p[%s, 0]: %s" % (X, p[X,0]))
        d("np.mean(p[X,y]) for y = 0: %s" % (np.mean(p[X,0])))
        solution = [np.mean(p[X,y]) for y in range(N)]
    else:
        solution = [np.mean(X == y) for y in range(N)]
    return np.array(solution)

def exercise_4():
    p = np.array([[.4, .6], [.2, .8]])

    # results
    r1 = stationary1(p)
    r2 = stationary2(p, 1000)
    r3 = stationary3(p, 1000, lae=False)
    r3l = stationary3(p, 1000, lae=True)

    # timeit
    t1 = timeit(stationary1, p)
    t2 = timeit(stationary2, p, 1000)
    t3 = timeit(stationary3, p, 1000, False)
    t3l = timeit(stationary3, p, 1000, True)

    # output
    output({
        '-h' : 'results show probability of A or B result (B more likely) given: %s' % (
            p),
        'stationary1' : (r1, t1),
        'stationary2' : (r2, t2),
        'stationary3 (lookahead)' : (r3, t3),
        'stationary3 (lookahead)' : (r3l, t3l),
    })


"""
PROBLEM 5

* How accurate is the look-ahead estimator compared to standard Monte Carlo?
* Let's generate some observations and compare average accuracy
* The measure of accuracy will be the d1 distance


The matrix we will use is 2000 x 2000, stored in this file

Problem:

    * Download the data file
    * Load the matrix using loadtxt()
    * Solve for the exact stationary distribution q using the first (i.e. linear algebra) technique
    * Generate 100 observations of the Monte Carlo estimator (n = 1000) and
      calculate average d1 distance from q
    * Generate 100 observations of the look-ahead estimator (n = 1000) and
      calculate average d1 distance from q
"""
import numpy as np

def test_estimator(q, f, replications=100):
    """
    The estimator f returns an estimate of q.  Draw n=replications
    observations and return average d1 distance

    Parameters:

        * q is a NumPy array representing the exact distribution
        * f is a function that, when called, returns an estimate of q
            as a NumPy array
        * replications is a positive integer giving the sample size

    Returns: A float
    """
    results = np.empty(replications)
    for i in range(replications):
        results[i] = np.sum(np.abs(f() - q))
    return results.mean()


def exercise_5():
    global DEBUG
    DEBUG = False
    p = np.loadtxt('matrix_dat.txt')
    q = stationary1(p)  # Exact stationary distribution

    print "Standard MC, average distance:"
    print test_estimator(q, lambda: stationary3(p))

    print "Look-ahead MC, average distance:"
    print test_estimator(q, lambda: stationary3(p, lae=True))




#
# MAIN
#
#
# MAIN
#
import sys

EXERCISES = [exercise_1, exercise_2, exercise_3, exercise_4, exercise_5]

def main():
    num = len(sys.argv) > 1 and int(sys.argv[1]) or len(EXERCISES)

    if num >= 1 and num <= len(EXERCISES):
        print "\n** EXERCISE %s **\n" % (num)
        EXERCISES[num - 1]()
    else:
        print 'specify a number between 1, %s' % (len(EXERCISES))

if __name__ == "__main__":
    main()
    print '%s: ok' % (__file__)
