#!/usr/bin/env python 

from util import * 
from numpy import *
from math import log
import copy
import sys

# If PRODUCTION is false, don't do smoothing or renormalization 
# of alphas and betas (useful for testing/confirming with class notes)
PRODUCTION = True

# Pretty printing for 1D/2D numpy arrays
MAX_PRINTING_SIZE = 30

def format_array(arr):
    s = shape(arr)
    if s[0] > MAX_PRINTING_SIZE or (len(s) == 2 and s[1] > MAX_PRINTING_SIZE):
        return "[  too many values (%s)   ]" % str(s)

    if len(s) == 1:
        return  "[  " + (
            " ".join(["%.6f" % float(arr[i]) for i in range(s[0])])) + "  ]"
    else:
        lines = []
        for i in range(s[0]):
            lines.append("[  " + "  ".join(["%.6f" % float(arr[i,j]) for j in range(s[1])]) + "  ]")
        return "\n".join(lines)


def format_array_print(arr):
    print format_array(arr)

def init_random_model(N, max_obs, seed=None):
    if seed==None:
        random.seed()
    else:
        random.seed(seed)

    # Initialize things to random values
    tran_model = random.random([N,N])
    obs_model  = random.random([N,max_obs])    
    initial    = random.random([N])

    initial    = ones([N])

    # Normalize
    initial    = initial/sum(initial)
    for i in range(N): 
        tran_model[i,:] = tran_model[i,:]/sum(tran_model[i,:])
        obs_model[i,:]  = obs_model[i,:]/sum(obs_model[i,:])
    
    return (initial, tran_model, obs_model)


def string_of_model(model, label):
    (initial, tran_model, obs_model) = model
    return """
Model: %s 
initial: 
%s

transition: 
%s

observation: 
%s

""" % (label, 
       format_array(initial),
       format_array(tran_model),
       format_array(obs_model))

    
def check_model(model):
    """Check that things add to one as they should"""
    (initial, tran_model, obs_model) = model
    for state in range(len(initial)):
        assert((abs(sum(tran_model[state,:]) - 1)) <= 0.01)
        assert((abs(sum(obs_model[state,:]) - 1)) <= 0.01)
        assert((abs(sum(initial) - 1)) <= 0.01)

def print_model(model, label):
    check_model(model)
    print string_of_model(model, label)    

def max_delta(model, new_model):
    """Return the largest difference between any two corresponding 
    values in the models"""
    return max( [(abs(model[i] - new_model[i])).max() for i in range(len(model))] )


def get_alpha(obs, model):
    """ Returns the array of alphas and the log likelyhood of the sequence.

    Note: doing normalization as described in Ghahramani '01--just normalizing
    both alpha and beta to sum to 1 at each time step."""

    normalize = PRODUCTION
    (initial, tran_model, obs_model) = model
    N = shape(tran_model)[0]
    n = len(obs)
    loglikelyhood = 0

    # indexes: alpha[time,state]
    alpha = zeros((n,N))
    alpha[0,:] = initial * obs_model[:,obs[0]]
    if normalize:
        normalization = sum(alpha[0,:])
        alpha[0,:] /= normalization
        # adding the logs of the normalization gives the 
        # log likelyhood of the sequence.  Why?
        loglikelyhood += log(normalization)

    for t in range(1,n):
        for j in range(N):
            s = sum(tran_model[:,j]*alpha[t-1,:])
            alpha[t,j] = s * obs_model[j,obs[t]]
        if normalize: 
            normalization = sum(alpha[t,:])
            loglikelyhood += log(normalization)
            alpha[t,:] /= normalization
    
    if not normalize:
        loglikelyhood = log (sum(alpha[n-1,:]))
        
    return alpha, loglikelyhood

def get_beta(obs,model):
    """ Note: doing normalization as described in Ghahramani '01--just normalizing
    both alpha and beta to sum to 1 at each time step."""
    normalize = PRODUCTION

    (initial, tran_model, obs_model) = model
    N = shape(tran_model)[0]
    n = len(obs)
    # beta[time,state]
    beta = zeros((n,N))

    beta[n-1,:] = ones(N)
    if normalize:
        beta[n-1,:] /= N
    for t in range(n-2,-1,-1):
        for i in range(N):
            beta[t,i] = sum(tran_model[i,:]*obs_model[:,obs[t+1]]*beta[t+1,:])
        if normalize:
            normalization = sum(beta[t,:])
            beta[t,:] /= normalization
    return beta


def get_gamma(alpha, beta):
    (n,N) = shape(alpha)
    gamma = zeros((n,N))
    for t in range(n):
        normalization = sum(alpha[t,:]*beta[t,:])
        gamma[t,:] = alpha[t,:] * beta[t,:] / normalization
    return gamma


def get_xi(obs,alpha, beta, model):
    (initial, tran_model, obs_model) = model
    N = shape(tran_model)[0]
    n = len(obs)
    xi = zeros((n, N, N))
    for t in range(n-1):
        s = 0
        for i in range(N):
            xi[t,i,:] = alpha[t,i] * tran_model[i,:] * obs_model[:,obs[t+1]] * beta[t+1,:]
            s += sum(xi[t,i,:])
        xi[t,:,:] = xi[t,:,:] / s
    return xi



# Note: This implementation is as presented in the Rabiner '89 HMM tutorial.
# Variable definitions
# obs    = list of numpy arrays representing multiple observation sequences
# K = the number of observation sequences
# N = num hidden states 
# M = number of possible observations (assuming 0-indexed)
# For each observation sequence:
#   n = number of observations in the sequence.  (indexed 0..n-1)
def baumwelch(obs,N,M,debug=False,init_model=None):    
    K = len(obs)

    if debug:
        print "K=%d N=%d  M=%d" % (K, N, M)

    smoothing = PRODUCTION

    if init_model == None:
        if debug:
            seed = 42
        else:
            # Just making things deterministic for now.
            # Change to "seed = None" if you want to experiment with
            # random restart, for example.
            seed = 42   
        model = init_random_model(N,M, seed)
    else:
        model = init_model

    if debug:
        print_model(model, "Initial random")
    
    # Loop variables
    iters = 1
    # Keep track of the likelihood of the observation sequences
    loglikelihoods = []    
    while True:
        if debug:
            print "\n\n======= Starting iteration %d ========" % iters
        # Pull out latest parameters
        (initial, tran_model, obs_model) = model

        if smoothing:
            # Using prior that we've been in every state once, and seen
            # uniform everything.
            N_ho = ones((N,M)) / M
            N_h1h2 = ones((N,N)) / N
            # Number of times in each initial state (for init model)
            N_h1 = ones(N) / N
        
            # Number of times in each state at all (for obs model)
            N_h = ones(N)
        else:
            N_ho = zeros((N,M))
            N_h1h2 = zeros((N,N))
            # Number of times in each initial state (for init model)
            N_h1 = zeros(N)
        
            # Number of times in each state at all (for obs model)
            N_h = zeros(N)


        old_model = copy.deepcopy(model)
        
        #### Expectation step ####
        loglikelihoods.append(0.0)
        for seq in obs:
            if debug:
                print "Processing sequence: ", seq

            n = len(seq)
            alpha, loglikelihood = get_alpha(seq, model)
            beta = get_beta(seq, model)
            gamma = get_gamma(alpha, beta)
            xi = get_xi(seq, alpha, beta, model)

            loglikelihoods[-1] += loglikelihood

            if debug:
                print "alpha: "
                format_array_print (alpha)
                print "beta: "
                format_array_print (beta)
                print "gamma: "
                format_array_print (gamma)
                print "xi: "
                print xi

            # Assertions:
            for t in range(n-1):
                for i in range(N):
                    delta = abs(gamma[t,i] - sum(xi[t,i,:]))
                    epsilon = 0.0001
                    if delta > epsilon:
                        print (
                            "ERROR: abs(gamma[t,i] - sum(xi[t,i,:])) = %f > %f" % 
                            (delta, epsilon))
                        assert(delta < epsilon)

            # initial counts
            N_h1 += gamma[0,:]

            # transition counts
            for i in range(N):
                for j in range(N):
                    N_h1h2[i,j] += sum(xi[:n-1,i,j])
            
            # observation counts
            for t in range(n):
                k = seq[t]
                for j in range(N):
                    N_ho[j,k] += gamma[t,j]

            # counts for how often we're ever in each state 
            for j in range(N):
                N_h[j] += sum(gamma[:,j]) 


        ### Maximization step ###
        initial = N_h1 / sum(N_h1)

        # transition model
        for i in range(N):
            s = sum(N_h1h2[i,:])
            for j in range(N):
                tran_model[i,j] = N_h1h2[i,j] / s

        # observation model
        for i in range(N):
            for k in range(M):
                obs_model[i,k] = N_ho[i,k] / N_h[i]


        model = (initial, tran_model, obs_model)

        # Termination
        if debug:
            print_model(model, "After %d iterations" % iters)
        delta = max_delta(model, old_model)
        print "Iters = %d, delta = %f, Log prob of sequences: %f" % (
            iters, delta, loglikelihoods[-1])
        sys.stdout.flush()

        iters += 1

        improvement = 1
        if len(loglikelihoods) > 1:
            cur = loglikelihoods[-1]
            prev = loglikelihoods[-2]
            
            improvement = (cur-prev) / abs(prev)

        # Two ways to stop: 
        # (1) the probs stop changing
        epsilon = 0.001
        if delta < epsilon:
            print "Converged to within %f!\n\n" % epsilon
            break
        
        # (2) the improvement in log likelyhood is too small to bother
        smaller = 0.0002
        if improvement < smaller:
            print "Converged. Log likelyhood improvement was less that %f.\n\n" % smaller
            break

    return tran_model, obs_model, initial




class HMM:
    """ HMM Class that defines the parameters for HMM """
    def __init__(self, states, outputs):
        """If the hmm is going to be trained from data with labeled states,
        states should be a list of the state names.  If the HMM is
        going to trained using EM, states can just be range(num_states)."""
        self.states = states
        self.outputs = outputs
        n_s = len(states)
        n_o = len(outputs)
        self.num_states = n_s
        self.num_outputs = n_o
        self.initial = zeros(n_s)
        self.transition = zeros([n_s,n_s])
        self.observation = zeros([n_s, n_o])

    def check(self):
        check_model( (self.initial, self.transition, self.observation))

    def set_hidden_model(self, init, trans, observ):
        """ Debugging function: set the model parameters explicitly """
        self.num_states = len(init)
        self.num_outputs = len(observ[0])
        self.initial = array(init)
        self.transition = array(trans)
        self.observation = array(observ)
        self.check()
        self.compute_logs()

    def compute_logs(self):
        """Compute and store the logs of the model"""
        f = lambda xs: map(log, xs)
        self.log_initial = f(self.initial)
        self.log_transition = map(f, self.transition)
        self.log_observation = map(f, self.observation)

    def __repr__(self):
        if len(self.states) > MAX_PRINTING_SIZE:
            statestr = " <too many states to print (%d)>" % len(self.states)
        else:
            statestr = " ".join(array_to_string(self.states))
        if len(self.outputs) > MAX_PRINTING_SIZE:
            outputstr = " <too many outputs to print (%d)>" % len(self.outputs)
        else:
            outputstr = " ".join(array_to_string(self.outputs))

        return """states = %s
observations = %s
%s
""" % (statestr,
       outputstr,
       string_of_model((self.initial, self.transition, self.observation), ""))

     
    def learn_from_labeled_data(self, state_seqs, obs_seqs):
        """
        Learn the parameters given state and observations sequences. 
        states[i][j] must correspond with observations[i][j].
        Uses Laplacian smoothing to avoid zero probabilities.
        """
        # Fill this in...
        raise Exception("Not implemented")

#         self.initial = normalize(...)
#         self.transition = ...
#         self.observation = ...
#         self.compute_logs()
        

                     
    # declare the @ decorator just before the function, invokes print_timing()
    @print_timing
    def learn_from_observations(self, instances, debug=False):
        """
        Learn hmm parameters based on the specified instances.
        This would find the maximum likelyhood transition model,
        observation model, and initial probabilities.
        """
        (self.transition, 
         self.observation,
         self.initial) = baumwelch(instances,
                                   len(self.states), 
                                   len(self.outputs), 
                                   debug)
        
        self.compute_logs()
        

    # Return the log probability that this hmm assigns to a particular output
    # sequence
    def log_prob_of_sequence(self, sequence):
        model = (self.initial, self.transition, self.observation) 
        alpha, loglikelyhood = get_alpha(sequence, model)

        return loglikelyhood

    def most_likely_states(self, sequence, debug=False):
        """Return the most like sequence of states given an output sequence.
        Uses Viterbi algorithm to compute this.
        """
        # Code modified from wikipedia
        # Modifications: use logs, don't compute total prob of sequence

        # Change this to use logs

        cnt = 0
        states = range(0, self.num_states)
        T = {}
        for state in states:
            ##          V.path   V. prob.
            output = sequence[0]
            p = self.initial[state] * self.observation[state][output]
            T[state] = ([state], p)
        for output in sequence[1:]:
            cnt += 1
            if debug:
                if cnt % 500 == 0:
                    print "processing sequence element %d" % cnt
                    sys.stdout.flush()
            U = {}
            for next_state in states:
                argmax = None
                valmax = None
                for source_state in states:
                    (v_path, v_prob) = T[source_state]
                    p = (self.transition[source_state][next_state] *
                         self.observation[next_state][output])
                    v_prob *= p

                    if valmax is None or v_prob > valmax:
                        argmax = v_path
                        valmax = v_prob
                # Using a nested (reversed) list for performance
                # reasons: the wikipedia code does a list copy, which
                # causes problems with long lists.  The reverse is
                # needed to make the flatten easy.  (This is
                # essentially using a lisp-like Cons cell representation)
                argmax = [next_state, argmax]
                U[next_state] = (argmax, valmax)
            T = U
        ## apply sum/max to the final states:
        argmax = None
        valmax = None
        for state in states:
            (v_path, v_prob) = T[state]
#            print "%s  %s" % T[state]
            if valmax is None or v_prob > valmax:
                argmax = v_path
                valmax = v_prob

        # Kept the list as in reverse order, and nested to make things fast.
        ans = custom_flatten(argmax)
        ans.reverse()
        return ans

    def gen_random_sequence(self, n):
        """
        Use the underlying model to generate a sequence of 
        n (state, observation) pairs
        """
        # pick a starting point
        state = random_from_dist(self.initial);
        obs = random_from_dist(self.observation[state])
        seq = [(state,obs)]
        for i in range(1,n):
            state = random_from_dist(self.transition[state])
            obs = random_from_dist(self.observation[state])
            seq.append( (state, obs) )
        return seq

def get_wikipedia_model():
    # From the rainy/sunny example on wikipedia (viterbi page)
    hmm = HMM(['Rainy','Sunny'], ['walk','shop','clean'])
    init = [0.6, 0.4]
    trans = [[0.7,0.3], [0.4,0.6]]
    observ = [[0.1,0.4,0.5], [0.6,0.3,0.1]]
    hmm.set_hidden_model(init, trans, observ)
    return hmm

def test():
    hmm = get_wikipedia_model()
    print "HMM is:"
    print hmm
    
    seq = [0,1,2]
    logp = hmm.log_prob_of_sequence(seq)
    p = exp(logp)
    print "prob ([walk, shop, clean]): logp= %f  p= %f" % (logp, p)
    print "most likely states (walk, shop, clean) = %s" % hmm.most_likely_states(seq)

if __name__ == "__main__":
    test()
