import numpy as np

def EM(training_data, DIM , num_of_model , num_of_state):
    '''
    Baum-Weich
    EM(training_data, DIM, num_of_model, num_of_state)
    '''

    # 发音的均值 39x12x11
    mean = np.zeros((DIM, num_of_state, num_of_model))

    # 发音与标准的差 39x12x11
    var = np.zeros((DIM, num_of_state, num_of_model))

    # 转移概率
    Aij = np.zeros((num_of_state+2, num_of_state+2, num_of_model))

    sum_of_features = np.zeros((DIM,1))
    sum_of_features_square = np.zeros((DIM, 1))
    num_of_feature = 0

    for features in training_data:
        ### all clear ###
        k = features["modelID"]
        features = features["features"]

        # 所有的特征加在一起
        sum_of_features = sum_of_features + np.sum(features, axis=1).reshape(-1, 1)

        # 所有的特征平方和
        sum_of_features_square = sum_of_features_square + np.sum(features * features, axis=1).reshape(-1, 1)

        # 所有音频的帧数和
        num_of_feature = num_of_feature + features.shape[1]

    for k in range(num_of_model):
        for m in range(num_of_state):
            mean[:, m, k] = (sum_of_features/num_of_feature).reshape(-1)

            var[:, m, k] = (sum_of_features_square/num_of_feature).reshape(-1) - mean[:, m, k]**2

        for i in range(1, num_of_state+1):
            Aij[i, i+1, k] = 0.4
            Aij[i, i, k] = 1-Aij[i, i+1, k]

        Aij[0, 1, k] = 1

    num_of_iteration = 20
    log_likelihood_iter = np.zeros((1, num_of_iteration))
    likelihood_iter = np.zeros((1, num_of_iteration))

    for iter in range(num_of_iteration):

        sum_mean_numerator = np.zeros((DIM, num_of_state, num_of_model))
        sum_var_numerator = np.zeros((DIM, num_of_state, num_of_model))
        sum_aij_numerator = np.zeros((num_of_state, num_of_state, num_of_model))
        sum_denominator = np.zeros((num_of_state, num_of_model))
        log_likelihood = 0
        likelihood = 0

        for features in training_data:
            # k: MODEL ID (0, 1, 2,..., 9)
            k = features["modelID"]
            if k == "Z":
                k = 0
            elif k == "O":
                k = 10
            else:
                k = int(k)

            obs = features["features"]
            mean_numerator, var_numerator, aij_numerator, denominator, log_likelihood_i, likelihood_i = EM_HMM_FR(mean[:,:,k], var[:,:,k], Aij[:,:,k], obs)

            sum_mean_numerator[:,:,k] = sum_mean_numerator[:,:,k] + mean_numerator[:,1:-1]
            sum_var_numerator[:,:,k] = sum_var_numerator[:,:,k] + var_numerator[:,1:-1]
            sum_aij_numerator[:,:,k] = sum_aij_numerator[:,:,k] + aij_numerator[1:-1,1:-1]
            sum_denominator[:,k] = sum_denominator[:,k] + denominator[1:-1].reshape(-1)

            log_likelihood = log_likelihood + log_likelihood_i
            likelihood = likelihood + likelihood_i

        for k in range(num_of_model):
            for n in range(num_of_state):
                mean[:, n, k] = sum_mean_numerator[:, n, k] / sum_denominator[n, k]
                var[:, n, k] = sum_var_numerator[:, n, k] / sum_denominator[n, k] - mean[:, n, k]**2

        for k in range(num_of_model):
            for i in range(1, num_of_state+1):
                for j in range(1, num_of_state+1):
                    Aij[i, j, k] = sum_aij_numerator[i-1, j-1, k] / sum_denominator[i-1, k]
            Aij[num_of_state, num_of_state+1, k] = 1 - Aij[num_of_state, num_of_state, k]

        ### all clear ###
        Aij [num_of_state+1, num_of_state+1, k] = 1
        log_likelihood_iter[0][iter] = log_likelihood
        likelihood_iter[0][iter] = likelihood
        print("%d TRAINING DONE" % iter)
    print("HMM TRAINING DONE")
    print("START TO TEST")
    return mean, var, Aij


def EM_HMM_FR(mean, var, Aij, features):
    """
    EM_HMM_FR(mean[:,:, k], var[:,:, k], Aij[:,:, k], features)
    """
    ## all clear ##

    dim, T = features.shape

    mean = np.concatenate((np.full([dim, 1], np.nan), mean, np.full([dim, 1], np.nan)), axis=1)

    var = np.concatenate(([np.full([dim, 1], np.nan), var, np.full([dim, 1], np.nan)]), axis=1)

    Aij[-1][-1] = 1
    N = mean.shape[1]
    log_alpha = np.full([N, T + 1], -np.inf)
    log_beta = np.full([N, T + 1], -np.inf)

    for i in range(N):
        log_alpha[i][0] = np.log(Aij[0][i]) + logGaussian(mean[:, i], var[:, i], features[:, 0])

    for t in range(1, T):
        for j in range(1, N-1):
            log_alpha[j, t] = log_sum_alpha(log_alpha[1:N-1, t-1], Aij[1:N-1, j]) + \
                              logGaussian(mean[:, j], var[:, j], features[:, t])

    log_alpha[N-1,T] = log_sum_alpha(log_alpha[1:N-1,T-1],Aij[1:N-1,N-1])

    log_beta[:,T-1] = np.log(Aij[:, N-1])
    # range(start, stop, step)
    for t in range((T-1), 0, -1):
        for i in range(1, N-1):
            log_beta[i, t-1] = log_sum_beta(Aij[i, 1:N-1], mean[:, 1:N-1], var[:, 1:N-1], features[:, t], log_beta[1:N-1, t])

    log_beta[N-1, 0] = log_sum_beta(Aij[0, 1:N-1], mean[:, 1:N-1], var[:, 1:N-1], features[:, 0], log_beta[1:N-1, 0])

    log_Xi = -np.full([N, N, T], np.inf)
    for t in range(0, T-1):
        for j in range(1, N-1):
            for i in range(1, N-1):
                log_Xi[i, j, t] = log_alpha[i, t] + np.log(Aij[i, j]) + \
                                  logGaussian(mean[:, j], var[:, j], features[:, t+1]) + \
                                  log_beta[j, t+1] - log_alpha[N-1, T]

    for i in range(0,N):
        log_Xi[i,N-1,T-1] = log_alpha[i,T-1] + np.log(Aij[i,N-1]) - log_alpha[N-1, T]

    log_gamma = -np.full([N, T], np.inf)

    for t in range(0, T):
        for i in range(1, N-1):
            log_gamma[i, t] = log_alpha[i, t] + log_beta[i, t] - log_alpha[N-1, T]

    gamma = np.exp(log_gamma)

    mean_numerator = np.zeros((dim,N))
    var_numerator = np.zeros((dim,N))
    denominator = np.zeros((N,1))
    aij_numerator = np.zeros((N,N))

    for j in range(1,N-1):
        for t in range(0,T):
            mean_numerator[:,j] = mean_numerator[:,j] + np.dot(gamma[j,t], features[:,t])
            var_numerator[:,j] = var_numerator[:,j] + np.dot(gamma[j,t],features[:,t]) * features[:,t]
            #  分母
            denominator[j] = denominator[j] + gamma[j,t]

    for i in range(1,N-1):
        for j in range(1,N-1):
            for t in range(0,T):
                aij_numerator[i,j] = aij_numerator[i,j] + np.exp(log_Xi[i,j,t])

    log_likelihood = log_alpha[N-1,T]
    likelihood = np.exp(log_alpha[N-1,T])

    return mean_numerator, var_numerator, aij_numerator, denominator, log_likelihood, likelihood


def logGaussian (mean_i, var_i, o_i):
    ## all clear ##
    dim = len(var_i)
    log_b = -1/2*(dim * np.log(2 * np.pi) + np.sum(np.log(var_i)) + np.sum((o_i - mean_i) ** 2/var_i))
    return log_b


def log_sum_alpha(log_alpha_t,aij_j):
    ## all clear ##
    len_x = log_alpha_t.shape[0]
    y = -np.full([1, len_x], np.inf)[0]

    ymax = -np.inf
    for i in range(0,len_x):
        y[i] = log_alpha_t[i] + np.log(aij_j[i])
        if y[i] > ymax:
            ymax = y[i]

    if ymax == -np.inf:
        logsumalpha = -np.inf
    else :
        sum_exp = 0
        for i in range(0,len_x):
            if ymax == --np.inf and y[i] == -np.inf:
                sum_exp = sum_exp + 1
            else :
                sum_exp = sum_exp + np.exp(y[i] - ymax)

        logsumalpha = ymax + np.log(sum_exp)

    return logsumalpha


def log_sum_beta(aij_i,mean,var,obs,beta_t1):
    ## all clear ##
    len_x = mean.shape[1]
    y = -np.full([1, len_x], np.inf)[0]

    ymax = -np.inf
    for j in range(0,len_x):
        y[j] = np.log(aij_i[j]) + logGaussian(mean[:,j],var[:,j],obs) + beta_t1[j]
        if y[j] > ymax :
            ymax = y[j]

    if ymax == np.inf:
        logsumbeta = np.inf
    else:
        sum_exp = 0
        for i in range (0,len_x):
            if ymax == -np.inf and y[i] == -np.inf :
                sum_exp = sum_exp + 1
            else :
                sum_exp = sum_exp + np.exp(y[i] - ymax)

        logsumbeta = ymax + np.log(sum_exp)

    return logsumbeta

