import numpy as np
import pandas as pd
import json

def Normalization(sig):
    numrows = sig.shape[0]
    numcols = sig.shape[1]
    ss = np.zeros((numrows, numcols))
    data = np.zeros((numrows, numcols))
    coefficient = np.zeros((numrows, numcols))
    r = np.zeros((numcols, numcols))
    np.set_printoptions(suppress=True, precision=4)
    sigmean = np.mean(sig, axis=0)
    for j in range(0, numcols):
        for i in range(0, numrows):
            ss[i, j] = sig[i, j] / sigmean[j]
    for k in range(0, numcols):
        for j in range(0, numcols):
            data[:, j] = np.fabs(ss[:, j] - ss[:, k])
        min_min = 1
        max_max = 0
        for i in range(0, numrows):
            for j in range(0, numcols):
                if data[i, j] <= min_min:
                    min_min = data[i, j]
                else:
                    if data[i, j] >= max_max:
                        max_max = data[i, j]

        for i in range(0, numrows):
            for j in range(0, numcols):
                coefficient[i, j] = (min_min + 0.5 * max_max) / (data[i, j] + 0.5 * max_max)
        r[k, :] = np.sum(coefficient, axis=0) / coefficient.shape[0]
    w0 = np.sum(r, axis=1)
    wsum = 0
    for i in range(0, len(w0)):
        wsum = wsum + w0[i]
    w = np.zeros((1, len(w0)))
    for i in range(0, len(w0)):
        w[0][i] = w0[i] / wsum
    return w


def differvalue(sig):
    standard_index = np.array([[2.9, 3.1, 8.1, 15, 0.01, 0.05, 0.1, 0.3, 0.5]])
    numrows = sig.shape[0]
    numcols = sig.shape[1]
    differvalue_matrix = np.zeros((numrows, numcols))
    for i in range(0, 3):
        differvalue_matrix[:, i] = sig[:, i] - standard_index[0][i]
    differvalue_matrix[:, 3] = -abs(sig[:, 3] - standard_index[0][3])
    for i in range(4, 9):
        differvalue_matrix[:, i] = abs(sig[:, i] - standard_index[0][i])
    return differvalue_matrix


def data_quality(dvalue_matrix, w):
    row = dvalue_matrix.shape[0]
    col = dvalue_matrix.shape[1]
    quality_data = np.zeros((row, 1))
    for i in range(0, row):
        for j in range(0, col):
            quality_data[i][0] = quality_data[i][0] + dvalue_matrix[i][j] * w[0][j]

    for i in range(0, row):
        if ((quality_data[i][0] >= (-0.0776)) & (quality_data[i][0] <= 0.06096)):
            quality_data[i][0] = 1
        if ((quality_data[i][0] > 0.06096) & (quality_data[i][0] <= 0.19952)):
            quality_data[i][0] = 2
        if ((quality_data[i][0] > 0.19952) & (quality_data[i][0] <= 0.33808)):
            quality_data[i][0] = 3
        if ((quality_data[i][0] > 0.33808) & (quality_data[i][0] <= 0.47664)):
            quality_data[i][0] = 4
        if ((quality_data[i][0] > 0.47664) & (quality_data[i][0] <= 0.6152)):
            quality_data[i][0] = 5
    return quality_data


def ForwardBackwardAlgo(A, B, Pi, O):
    N = A.shape[0]
    M = A.shape[1]
    K = O.shape[0]
    sum_alpha_j = np.zeros((N, 1))
    sum_beta_i = np.zeros((N, 1))
    P1 = np.zeros((N, K))
    P2 = np.zeros((N, K))
    for i in range(0, N):
        P1[i, 0] = Pi[i] * B[i, int(O[0, 0]) - 1]
    for k in range(0, K - 1):
        for j in range(0, M):
            for i in range(0, N):
                sum_alpha_j[i, 0] = P1[i, k] * A[i, j]
            P1[j, k + 1] = np.sum(sum_alpha_j, axis=0) * B[j, int(O[k + 1, 0]) - 1]
    p1 = np.sum(P1[:, K - 1], axis=0)
    P2[:, K - 1] = 1
    for k in range(K - 2, -1, -1):
        for i in range(0, M):
            for j in range(0, N):
                sum_beta_i[j, 0] = A[i, j] * B[j, int(O[k + 1, 0]) - 1] * P2[j, k + 1]
            P2[i, k] = np.sum(sum_beta_i, axis=0)
    return (P1, P2, p1)


def GammaXiAlgo(A, B, Pi, O):
    N = A.shape[0]
    M = A.shape[1]
    K = O.shape[0]
    (P1, P2, p1) = ForwardBackwardAlgo(A, B, Pi, O)
    Gamma = np.zeros((M, K))
    for t in range(0, K):
        for i in range(0, M):
            Gamma[i, t] = P1[i, t] * P2[i, t] / p1
    Xi = np.zeros((M, N, K - 1))
    for t in range(0, K - 1):
        for i in range(0, M):
            for j in range(0, N):
                Xi[i, j, t] = P1[i, t] * A[i, j] * B[j, int(O[t + 1, 0]) - 1] * P2[j, t + 1] / p1
    return (Gamma, Xi)


def BaumWelchAlgo_1(A, B, Pi, O):
    N = A.shape[0]
    M = A.shape[1]
    Y = B.shape[1]
    K = O.shape[0]
    (Gamma, Xi) = GammaXiAlgo(A, B, Pi, O)
    A_1 = np.zeros((M, N))
    f = np.sum(Gamma, axis=1)
    sum_xi = np.sum(Xi, axis=0)
    for i in range(0, M):
        for j in range(0, N):
            A_1[i, j] = np.sum((np.sum(Xi[i, j, :], axis=0)), axis=0) / (f[i] - Gamma[i, K - 1])
    B_1 = np.zeros((N, Y))
    for y in range(1, Y + 1):
        for j in range(0, N):
            csum = 0
            for t in range(0, K):
                if O[t, 0] == y:
                    csum = csum + Gamma[j, t]
            B_1[j, y - 1] = csum / f[j]

    Pi_1 = np.zeros((M, 1))
    for i in range(0, M):
        Pi_1[i, 0] = Gamma[i, 0]
    return (A_1, B_1, Pi_1)


def BaumWelchAlgo_n(A, B, Pi, O, n):
    for r in range(1, n + 1):
        (A_n, B_n, Pi_n) = BaumWelchAlgo_1(A, B, Pi, O)
        A = A_n
        B = B_n
        Pi = Pi_n

    return (A_n, B_n, Pi_n)


def viterbi(A, B, Pi, O):
    n = A.shape[0]
    m = A.shape[1]
    k = O.shape[0]
    Delta = np.zeros((m, k))
    for i in range(0, m):
        Delta[i, 0] = Pi[i] * B[i, int(O[0, 0]) - 1]
    Delta_j = np.zeros((m, 1))
    Psi = np.zeros((m, k))
    Psi[:, 0] = 0
    for t in range(1, k):
        for j in range(0, n):
            for i in range(0, m):
                Delta_j[i, 0] = Delta[i, t - 1] * A[i, j] * B[j, int(O[t, 0]) - 1]
            max_delta_j = Delta_j.max(0)
            psi = np.argmax(Delta_j, axis=0)
            Psi[j, t] = psi + 1
            Delta[j, t] = max_delta_j
    psi_k = np.argmax(Delta[:, k - 1], axis=0)
    psik = psi_k + 1
    Psi = Psi.astype(np.int32)
    I = np.zeros((k, 1))
    I[k - 1, 0] = psik
    for t in range(k - 2, -1, -1):
        I[t, 0] = Psi[int(I[t + 1, 0]) - 1, t + 1]
    I = I.astype(np.int32)
    return (I)


def Main():
    fpath = './GRA_HMM_test.csv'#测试数据
    data = pd.read_csv(fpath, sep=',')#测试970条
    initial_path = './test_1.csv'  # 总数据
    indata = pd.read_csv(initial_path, sep=',')  # 测试970条
    inarray = np.array(indata)  # 测试
    array = np.array(data)#测试
    HMMpath = './train.csv'#训练数据
    HMMdata = pd.read_csv(HMMpath, sep=',')#训练375条
    HMMarray = np.array(HMMdata)#训练
    # 求指标权重
    w = Normalization(inarray)
    # 求源数据和国家标准的差值
    differvalue_matrix = differvalue(array) #总的
    HMM_differvalue_matrix = differvalue(HMMarray)#训练

    # 求输入序列
    print(w)
    A = np.array([[0.2000, 0.2000, 0.2000, 0.2000, 0.2000], [0.2000, 0.2000, 0.2000, 0.2000, 0.2000],
                  [0.2000, 0.2000, 0.2000, 0.2000, 0.2000], [0.2000, 0.2000, 0.2000, 0.2000, 0.2000],
                  [0.2000, 0.2000, 0.2000, 0.2000, 0.2000]])
    B = np.array([[0.1000, 0.1500, 0.2000, 0.2500, 0.3000], [0.1500, 0.1500, 0.2000, 0.2500, 0.2500],
                  [0.2000, 0.2000, 0.2000, 0.2000, 0.2000], [0.2500, 0.2500, 0.2000, 0.1500, 0.1500],
                  [0.3000, 0.2500, 0.2000, 0.1500, 0.1000]])
    Pi = np.array([[0.2000], [0.2000], [0.2000], [0.2000], [0.2000]])

  # 输入序列，即按照数据质量所得的风险分级
    HMM_O_array = data_quality(HMM_differvalue_matrix, w)#训练模型的输入数据
    O_array = data_quality(differvalue_matrix, w)#所有数据的风险等级
    matrix = pd.DataFrame(O_array)
    matrix.to_csv('./输入.csv')
    (HA, HB, HPi) = BaumWelchAlgo_n(A, B, Pi, HMM_O_array, 50)

    #np.savetxt('./out1.csv', A, delimiter=',')
    I = viterbi(HA, HB, HPi, O_array)
    #print(HA, HB, HPi)
    print("输出序列")
    #print(I)
    #print(I.shape[0],I.shape[1])
    data_matrix = pd.DataFrame(I)
    data_matrix.to_csv('./评估等级.csv')
    output=np.zeros((I.shape[0],2))
    output[:,0]=O_array[:,0]
    output[:,1]=6-I[:,0]
    actual = {}
    predicted = {}
    for i in range(len(output)):
        act = str(int(output[i][0]))
        pre = str(int(output[i][1]))
        if act in actual:
            actual[act] += 1
        else:
            actual[act] = 1
        if pre in predicted:
            predicted[pre] += 1
        else:
            predicted[pre] = 1
    result = {}
    result['actual'] = actual
    result['predicted'] = predicted
    with open('./output/res.txt', 'w') as f:
        f.write(json.dumps(result))

if __name__ == '__main__': Main()



