import numpy as np
import warnings

warnings.filterwarnings('ignore')
from scipy.special import logsumexp


def forward(log_initial, log_transition, log_emission, v):
    """
    Args:
        initial: a vector of length N
        transition: a matrix of shape N * N
        emission: a matrix of shape V * N
        v: observed sequence, a vector of length T

    Returns:
        log_alpha: a matrix of shape T * N, log_alpha[t, n] denote the sum of all possible latent state sequences ending at step t, state n
        log_Z: a float number
    """
    T = len(v)
    N = log_emission.shape[1]

    log_alpha = np.zeros((T, N))
    log_alpha[0] = log_emission[v[0]] + log_initial  # Q: why this is an addition, not a multiplication?

    for t in range(1, T):
        log_emission_t = log_emission[v[t]]
        log_alpha[t] = logsumexp(log_alpha[t - 1].reshape(N, 1) +
                                 log_transition +
                                 log_emission_t.reshape(1, N),
                                 axis=0)  # Q: what does the `logsumexp` function do here?
        # Q: what does the function `reshape` do here?

    log_Z = logsumexp(log_alpha[T - 1], axis=0)  # Q: what does the `logsumexp` function do here?
    return log_alpha, log_Z


def backward(log_initial, log_transition, log_emission, v):
    """
    Args:
        initial: a vector of length N
        transition: a matrix of shape N * N
        emission: a matrix of shape V * N
        v: observed sequence, a vector of length t

    Returns:
        log_beta: a matrix of shape T * N
        log_Z: a float number
    """
    T = len(v)
    N = log_emission.shape[1]

    log_beta = np.zeros((T, N))  # Q: Why is it initialised to zero?

    for t in range(T - 2, -1, -1):
        log_emission_t = log_emission[v[t + 1]]
        log_beta[t] = logsumexp(log_beta[t + 1].reshape(1, N) +
                                log_transition +
                                log_emission_t.reshape(1, N),
                                axis=1)
    log_Z = logsumexp(log_beta[0] + log_emission[v[0]] + log_initial)  # Q: why is it equal to the log-likelihood?

    return log_beta, log_Z


def viterbi(log_initial, log_transition, log_emission, v):
    """
    Args:
        initial: a vector of length N
        transition: a matrix of shape N * N
        emission: a matrix of shape V * N
        v: observed sequence, a vector of length T

    Returns:
        max_log_p: a float number, maximum probability of the latent sequence
    """
    T = len(v)
    N = log_emission.shape[1]

    log_s = np.zeros((T, N))
    log_s[0] = log_emission[v[0]] + log_initial

    for t in range(1, T):
        log_s[t] = np.max(log_s[t - 1].reshape(N, 1) +
                          log_transition +
                          log_emission[v[t]].reshape(1, N),
                          axis=0)  # NOTE: change logsumexp to max here, compare this to Forward

    max_log_p_h = np.max(log_s[T - 1])  # NOTE: change the logsumexp to a max function here
    return max_log_p_h


def viterbi_backtracking(log_initial, log_transition, log_emission, v):
    """
    Args:
        initial: a vector of length N
        transition: a matrix of shape N * N
        emission: a matrix of shape V * N
        v: observed sequence, a vector of length T

    Returns:
        max_log_p: a float number, maximum probability of the latent sequence
        max_h: max probability latent sequence, a vector of length T
    """
    T = len(v)
    N = log_emission.shape[1]

    log_s = np.zeros((T, N))
    max_ptr = np.zeros((T, N))
    log_s[0] = log_emission[v[0]] + log_initial

    for t in range(1, T):
        log_phi_t = log_transition + log_emission[v[t]].reshape(1, N)
        log_s_phi_t = log_s[t - 1].reshape(N, 1) + log_phi_t
        log_s[t] = np.max(log_s_phi_t, axis=0)
        max_ptr[t] = np.argmax(log_s_phi_t, axis=0)  # record the back pointer

    max_log_p_h = np.max(log_s[T - 1])

    # backtracking start
    max_h = np.zeros(T).astype(int)
    max_h[T - 1] = np.argmax(log_s[T - 1])
    for t in range(T - 2, -1, -1):
        max_h[t] = max_ptr[t + 1, max_h[t + 1]]
    return max_log_p_h, max_h


id2state = {0: 'Subject', 1: 'Adjective', 2: 'Adverb', 3: 'Verb', 4: 'Object', 5: '<EOS>'}
state2id = {id2state[s]: s for s in id2state}
word2id = {'I': 0, 'He': 1, 'Jack': 2, 'Mary': 3, 'likes': 4, 'loves': 5, 'hates': 6, 'really': 7, 'extremely': 8, 'pretty': 9, 'cute': 10, 'adorable': 11, 'cats': 12, 'dogs': 13, '.': 14}
id2word = {word2id[w]: w for w in word2id}


log_initial_lang = np.log([0.7, 0.3, 0., 0., 0., 0.])
log_transition_lang = np.log(np.array([[0., 0., 0.3, 0.7, 0., 0.],
                                       [0.4, 0.1, 0., 0., 0.5, 0.],
                                       [0., 0.3, 0., 0.7, 0., 0.],
                                       [0., 0.3, 0.2, 0., 0.5, 0.],
                                       [0., 0., 0., 0., 0., 1.],
                                       [0., 0., 0., 0., 0., 1.],
                                      ]))
log_emission_lang = np.log(np.array([
    [0.2, 0.2, 0.2, 0.2, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0.1, 0.],
    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0.5, 0.25, 0.25, 0, 0, 0.],
    [0, 0, 0, 0, 0, 0, 0, 0.25, 0.25, 0.5, 0, 0, 0, 0, 0],
    [0, 0, 0, 0, 0.3, 0.4, 0.3, 0, 0, 0, 0, 0, 0, 0, 0],
    [0, 0, 0.2, 0.2, 0, 0, 0, 0, 0, 0, 0, 0, 0.3, 0.3, 0],
    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.]
    ])).T
v = [0, 1, 0, 1] # an example observation sequence. Try other sequences yourself.

v = ["Jack", "likes", "pretty", "cute", "dogs"]
v_id = [word2id[word] for word in v]

_, max_h = viterbi_backtracking(log_initial_lang, log_transition_lang, log_emission_lang, v=v_id)
print(' '.join([id2state[h] for h in max_h]))