import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import  ConnectionPatch
import random
from tqdm import tqdm
from sklearn.decomposition import PCA
from sklearn.decomposition import KernelPCA
from scipy import stats
from scipy.stats import norm, chi2
import pandas as pd
from scipy import stats
from scipy.stats import norm, chi2
from sklearn.preprocessing import StandardScaler
# plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
# plt.rcParams['axes.unicode_minus'] = False #用来正常显示负号

from scipy.spatial.distance import pdist, squareform
from scipy import exp
from scipy.linalg import eigh
# kpca
from sklearn.metrics import mean_squared_error


def t2_online(x, p, v):
    '''
    p：特征向量组成的降维矩阵，负载矩阵
    x：在线样本，shape为m*1
    v：特征值由大至小构成的对角矩阵
    '''
    # print("x.shape= ",x.shape)
    # print("p.shape= ",p.shape)
    # print("v.shape= ",v.shape)
    
    # 计算T^2_{i} 见 2010_Ge (17)           
    # sum_{j=1}^{ki}{ xt,i * Pi / vj }
    T_2 = np.dot(np.dot((np.dot((np.dot(x.T, p)), np.linalg.inv(v))), p.T), x)

    # np.dot()
    # A = np.dot((np.dot((np.dot(x.T, p)), np.linalg.inv(v))), p.T)

    # 返回 T2统计量
    return T_2



def spe_online(x, p):
    '''
    p：特征向量组成的降维矩阵，负载矩阵
    x：在线样本，shape为m*1
    '''
    I = np.eye(len(x))
    spe = np.dot(np.dot(x.T, I - np.dot(p, p.T)), x)
    # Q_count = np.linalg.norm(np.dot((I - np.dot(p_k, p_k.T)), test_data_nor), ord=None, axis=None, keepdims=False)  #二范数计算方法
    return spe




def pca_TSCs(X, p, v, t):
    # TSCs = []
    # # X = X.transpose()
    # for x in range(np.shape(X)[0]):
    #     data_in = X[x]
    #     tsc = t2_online(data_in, p, v)
    #     TSCs.append(tsc)
    # return TSCs
    N = np.array(X).shape[0]
    M = np.array(X).shape[1]

    TSCs = []
    v_ = np.linalg.inv(v)
    N = np.array(X).shape[0]
    M = np.array(v).shape[0]
    for jdx in range(0, M):
        tp = []
        for idx in range(0, N):
            T_2 = np.dot(np.dot((np.dot((np.dot(X[idx].T, p[jdx])), v_[jdx][jdx])), p[jdx].T), X[idx])
            tp.append(T_2)
        TSCs.append(tp)
    return TSCs


def pca_loading_matrix(Xtrain, ratio = 0.99, confidence = 0.99):

    pca = PCA(n_components = ratio)
    pca.fit(Xtrain)     # fit(X)，表示用数据X来训练PCA模型
    evr = pca.explained_variance_ratio_  # 返回所保留的n个成分各自的方差百分比。
    t = pca.fit_transform(Xtrain)
    ev = pca.explained_variance_ # 方差，相当于X的协方差的最大的前几个特征值。 explained_variance_代表降维后的各主成分的方差值
    n_com = pca.n_components  # 返回所保留的成分个数n
    p = (pca.components_).T #loading matrix 加载矩阵。 components_是返回具有最大方差的成分。
    v = np.diag(ev) # 特征值组成的对角矩阵
    # 返回：加载矩阵, 对角矩阵
    # print("p.shape=",np.array(p).shape)
    # print("v.shape=",np.array(v).shape)
    # print("t.shape=",np.array(t).shape)

    return p, v, t


# 写一个KPCA版本
def pca_loading_matrix_kpca(Xtrain, ratio = 0.99, confidence = 0.99):
    pca=KernelPCA(n_components=ratio,kernel="rbf",gamma=0.0433,fit_inverse_transform=True)
    x_reduced=pca.fit(Xtrain)                   # fit(X)，表示用数据X来训练KPCA模型
    X_pca = pca.fit_transform(Xtrain)
    t=pca.inverse_transform(X_pca)
    mean_squared_error(Xtrain,t)
    

    ev = np.var(X_pca, axis=0)
    PCnum = len(ev)

    alphas = pca.alphas_
    p = alphas[:,0 : PCnum]
    lambdas = pca.lambdas_
    v = np.diag(lambdas[0 : PCnum])


    # evr = pca.explained_variance_ratio_  # 返回所保留的n个成分各自的方差百分比。
    # ev = pca.explained_variance_ # 方差，相当于X的协方差的最大的前几个特征值。 explained_variance_代表降维后的各主成分的方差值
    # ev = np.var(X_pca, axis=0)
    # p = (pca.components_).T #loading matrix 加载矩阵。 components_是返回具有最大方差的成分。
    # v = np.diag(ev) # 特征值组成的对角矩阵
    # 返回：加载矩阵, 对角矩阵, 得分矩阵

    return p, v, t


## 2022.4.14 添加一个PCA方法
def pca_easy(Xtrain, ratio = 0.99, confidence = 0.99):
    pca = PCA(n_components = ratio)
    pca.fit(Xtrain)
    t = pca.fit_transform(Xtrain)           # after PCA, data
    # print("t.shape=",np.array(t).shape)
    return t

# 2023.4.15 添加一个KPCA方法、
def kpca_easy(Xtrain, ratio = 0.99, confidence = 0.99):
    pca=KernelPCA(n_components=ratio,kernel="rbf",gamma=0.0433,fit_inverse_transform=True)
    x_reduced=pca.fit(Xtrain)                   # fit(X)，表示用数据X来训练KPCA模型
    X_pca = pca.fit_transform(Xtrain)
    t=pca.inverse_transform(X_pca)
    return t



def pca_control_limit(Xtrain, ratio = 0.99, confidence = 0.99):
    '''
    计算出T2和SPE统计量阈值
    '''
    pca = PCA(n_components = ratio)
    pca.fit(Xtrain)     # fit(X)，表示用数据X来训练PCA模型
    evr = pca.explained_variance_ratio_  # 返回所保留的n个成分各自的方差百分比。
    ev = pca.explained_variance_ # 方差，相当于X的协方差的最大的前几个特征值。 explained_variance_代表降维后的各主成分的方差值
    n_com = pca.n_components  # 返回所保留的成分个数n
    p = (pca.components_).T #loading matrix 加载矩阵。 components_是返回具有最大方差的成分。
    v = np.diag(ev) # 特征值组成的对角矩阵
    v_all = PCA(n_components = Xtrain.shape[1]).fit(Xtrain).explained_variance_
    p_all = (PCA(n_components = Xtrain.shape[1]).fit(Xtrain).components_).T
    k = len(evr)    # K是在主导空间中保留的PCs的数量?（解释存疑）
    n_sample = pca.n_samples_
    ##T统计量阈值计算
    # 这里是计算T^2_{i,lim}，其中t_limit就是最后的结果，具体对比T^2_{i,lim}计算式(略有不同)
    coe = k* (n_sample - 1) * (n_sample + 1) / ((n_sample - k) * n_sample)
    t_limit = coe * stats.f.ppf(confidence, k, (n_sample - k))

    ##SPE统计量阈值计算
    theta1 = np.sum((v_all[k:]) ** 1)
    theta2 = np.sum((v_all[k:]) ** 2)
    theta3 = np.sum((v_all[k:]) ** 3)
    
    h0 = 1 - (2 * theta1 * theta3) / (3 * (theta2 ** 2))
    c_alpha = norm.ppf(confidence)
    spe_limit = theta1 * ((h0 * c_alpha * ((2 * theta2) ** 0.5) / theta1 + 1 + theta2 * h0 * (h0 - 1) / (theta1 ** 2)) ** (1 / h0))

    # 返回：T2统计量，Q统计量，...
    return t_limit, spe_limit



def kpca_control_limit(Xtrain, ratio = 0.99, confidence = 0.99):
    '''
    KPCA    使用高斯核函数进行训练
        用于将为数据，并提取特征
    '''
    N, M = np.array(Xtrain).shape
    pca = PCA(n_components = ratio)
    pca.fit(Xtrain)
    evr = pca.explained_variance_ratio_
    PCnum = len(evr)

    Xtrain_ = np.array(Xtrain).T

    Xtrain_K, alphas, lambdas = rbf_kernel_pca(Xtrain_, gamma = 15, components = PCnum)
    Xtrain_K_all, alphas_all, lambdas_all = rbf_kernel_pca(Xtrain_, gamma = 15, components = M)

    ## 为在线T2,SPE监测需要的数据
    # p = alphas
    p = alphas[:,0 : PCnum]
    v = np.diag(lambdas[0 : PCnum])

    ## T2 statistic threshold calculation
    k = PCnum
    F = stats.f.ppf(confidence, k, N - 1)
    t_limit = k * (N - 1) * F / (N - k)

    ## SPE statistic threshold calculation
    v_all = np.array(lambdas_all)

    theta1 = 0
    theta2 = 0
    theta3 = 0
    for idx in range(k - 1, M):
        theta1 = theta1 + v_all[idx]
        theta2 = theta2 + v_all[idx] * v_all[idx]
        theta3 = theta3 + v_all[idx] * v_all[idx] * v_all[idx]
    
    h0 = 1 - ((2 * theta1 * theta3) / (3 * pow(theta2, 2)))
    c_alpha = norm.ppf(confidence)
    spe_limit = theta1 * pow(c_alpha * pow(2 * theta2 * pow(h0, 2), 0.5) / theta1 + 1 + theta2 * h0 * (h0 - 1) / pow(theta1, 2), 1 / h0)

    return t_limit, spe_limit, p, v



def rbf_kernel_pca(X,gamma,components):
    #计算欧式距离的平方
    sq_dist = pdist(X, 'sqeuclidean')
    mat_sq_dists = squareform(sq_dist)
    #计算K
    K = np.exp(-gamma * mat_sq_dists)
    N = K.shape[0]
    one_n = np.ones((N,N)) / N
    # 均值化处理,得核矩阵K
    K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
    #eigh可以对特征值进行排序，但是eigh适用于对称矩阵
    eigvals,eigvecs = np.linalg.eigh(K)
    eigvals,eigvecs = eigvals[::-1], eigvecs[:,::-1]
    
    #选取特征向量和特征值
    alphas = np.column_stack([eigvecs[:, i] for i in range(components)])
    lambdas = [eigvals[i] for i in range(components)]
    #返回alphas为降维后的投影坐标，lamdbas为对新数据进行投影的特征向量
    return K, alphas, lambdas





def pca_model_online(X, p, v):
    t_total = []
    q_total = []
    # PCnum = np.array(v).shape[0]
    # X_K, alphas, lambdas = rbf_kernel_pca(X.T, gamma = 15, components = PCnum)
    # np.shape(X)[0] -> 400
    for x in range(np.shape(X)[0]):
        data_in = X[x]
        t = t2_online(data_in, p, v)
        q = spe_online(data_in, p)
        t_total.append(t)
        q_total.append(q)
    return t_total, q_total




def figure_control_limit_kld(e_limit, KLDs):
    ## 画控制限的图
    maxx = 0
    for item in KLDs:
        maxx = max(maxx, item)
    plt.figure(1, figsize=(15, 5))
    ax1 = plt.subplot(1, 1, 1)
    plt.plot(KLDs)
    plt.plot(np.ones(len(KLDs)) * e_limit, 'r', label='BIC control limit')
    # ax1.set_ylim( -maxx / 20,maxx + maxx / 20)
    ax1.set_ylim(0, max(maxx + maxx / 20, 10 * e_limit))
    ax1.set_xlabel(u'Samples')
    ax1.set_ylabel(u'BIC_KLD statistic')
    plt.legend()
    plt.show()

def zone_and_linked(ax,axins,zone_left,zone_right,x,y,linked='bottom',
                    x_ratio=0.05,y_ratio=0.05):
    """缩放内嵌图形，并且进行连线
    ax:         调用plt.subplots返回的画布。例如： fig,ax = plt.subplots(1,1)
    axins:      内嵌图的画布。 例如 axins = ax.inset_axes((0.4,0.1,0.4,0.3))
    zone_left:  要放大区域的横坐标左端点
    zone_right: 要放大区域的横坐标右端点
    x:          X轴标签
    y:          列表，所有y值
    linked:     进行连线的位置，{'bottom','top','left','right'}
    x_ratio:    X轴缩放比例
    y_ratio:    Y轴缩放比例
    """
    xlim_left = x[zone_left]-(x[zone_right]-x[zone_left])*x_ratio
    xlim_right = x[zone_right]+(x[zone_right]-x[zone_left])*x_ratio

    y_data = np.hstack([yi[zone_left:zone_right] for yi in y])
    ylim_bottom = np.min(y_data)-(np.max(y_data)-np.min(y_data))*y_ratio
    ylim_top = np.max(y_data)+(np.max(y_data)-np.min(y_data))*y_ratio

    axins.set_xlim(xlim_left, xlim_right)
    axins.set_ylim(ylim_bottom, ylim_top)

    ax.plot([xlim_left,xlim_right,xlim_right,xlim_left,xlim_left],
            [ylim_bottom,ylim_bottom,ylim_top,ylim_top,ylim_bottom],"black")

    if linked == 'bottom':
        xyA_1, xyB_1 = (xlim_left,ylim_top), (xlim_left,ylim_bottom)
        xyA_2, xyB_2 = (xlim_right,ylim_top), (xlim_right,ylim_bottom)
    elif  linked == 'top':
        xyA_1, xyB_1 = (xlim_left,ylim_bottom), (xlim_left,ylim_top)
        xyA_2, xyB_2 = (xlim_right,ylim_bottom), (xlim_right,ylim_top)
    elif  linked == 'left':
        xyA_1, xyB_1 = (xlim_right,ylim_top), (xlim_left,ylim_top)
        xyA_2, xyB_2 = (xlim_right,ylim_bottom), (xlim_left,ylim_bottom)
    elif  linked == 'right':
        xyA_1, xyB_1 = (xlim_left,ylim_top), (xlim_right,ylim_top)
        xyA_2, xyB_2 = (xlim_left,ylim_bottom), (xlim_right,ylim_bottom)
        
    con = ConnectionPatch(xyA=xyA_1,xyB=xyB_1,coordsA="data",
                          coordsB="data",axesA=axins,axesB=ax)
    axins.add_artist(con)
    con = ConnectionPatch(xyA=xyA_2,xyB=xyB_2,coordsA="data",
                          coordsB="data",axesA=axins,axesB=ax)
    axins.add_artist(con)


def figure_control_limit_t2_spe(t_limit, spe_limit, t2s, spes, savefilename="./a.png"):
    ## 画控制限的图
    plt.figure(2, figsize=(12, 7))
    ax1 = plt.subplot(2, 1, 1)
    plt.plot(t2s, color='r')
    plt.xlim([0, len(t2s)])
    # plt.plot(np.ones((len(t2s))) * t_limit, 'b',label='$T_{2} control limit$')
    ax1.axhline(y=t_limit, ls="--", color="b", label='$T_{2}$ control limit')
    maxx = 0
    for item in t2s:
        maxx = max(maxx, item)
    myylim = max(maxx + maxx / 20, 10 * t_limit)
    if maxx <= 1.1 :
        myylim = 1.05
    # ax1.set_ylim(0, myylim)
    # ax1.set_ylim(0,1.0)
    # plt.xlim(0,1000)
    ax1.set_xlabel(u'Samples')
    ax1.set_xticks([0, 160, 200, 400, 600, 800, 960], ['0', '160', '200', '400', '600', '800', '960'])
    # ax1.set_xticks([0, 100, 200, 300, 400, 500], ['0', '100', '200', '300', '400', '500'])
    ax1.set_ylabel(u'$T^{2}$')

    x_idx = np.arange(0,960)
    # ax1ins = ax1.inset_axes((0.4, 0.3, 0.2, 0.5))
    # ax1ins.plot(t2s)
    # ax1ins.plot(np.ones((len(t2s))) * t_limit, 'r')
    # zone_and_linked(ax1, ax1ins, 158, 175, x_idx, [t2s, np.ones((len(t2s))) * t_limit], 'bottom')

    plt.legend()

    ax2 = plt.subplot(2, 1, 2)
    plt.plot(spes, color='r')
    plt.xlim([0, len(spes)])
    # plt.plot(np.ones((len(spes))) * spe_limit, 'b',label='$Q control limit$')
    ax2.axhline(y=spe_limit, ls="--", color="b", label='$Q$ control limit')
    maxx = 0
    for item in spes:
        maxx = max(maxx, item)
    myylim = max(maxx + maxx / 20, 10 * spe_limit)
    if maxx <= 1.1 :
        myylim = 1.05
    # ax2.set_ylim(0, myylim)
    # ax2.set_ylim(0,1.0)
    # plt.xlim(0,100)
    ax2.set_xlabel(u'Samples')
    ax2.set_xticks([0, 160, 200, 400, 600, 800, 960], ['0', '160', '200', '400', '600', '800', '960'])
    # ax2.set_xticks([0, 100, 200, 300, 400, 500], ['0', '100', '200', '300', '400', '500'])
    ax2.set_ylabel(u'$Q$')
    plt.legend()

    # 图片保存
    plt.savefig(savefilename, dpi=500, bbox_inches="tight")

    plt.show()

def figure_control_limit_t2_spe_log(t_limit, spe_limit, t2s, spes):
    ## 画控制限的图
    plt.figure(2, figsize=(12, 7))
    ax1 = plt.subplot(2, 1, 1)
    plt.plot(t2s)
    plt.plot(np.ones((len(t2s))) * t_limit, 'r',label='$T^{2}$ limit')
    maxx = 0
    plt.yscale('log')#设置纵坐标的缩放
    for item in t2s:
        maxx = max(maxx, item)
    myylim = max(maxx + maxx / 20, 10 * t_limit)
    if maxx <= 1.1 :
        myylim = 1.05
    ax1.set_ylim(0, myylim)
    # ax1.set_ylim(0,1.0)
    # plt.xlim(0,1000)
    ax1.set_xlabel(u'Samples')
    ax1.set_ylabel(u'$T^2$')

    x_idx = np.arange(0,960)
    # ax1ins = ax1.inset_axes((0.4, 0.3, 0.2, 0.5))
    # ax1ins.plot(t2s)
    # ax1ins.plot(np.ones((len(t2s))) * t_limit, 'r')
    # zone_and_linked(ax1, ax1ins, 158, 175, x_idx, [t2s, np.ones((len(t2s))) * t_limit], 'bottom')

    plt.legend()

    ax2 = plt.subplot(2, 1, 2)
    plt.plot(spes)
    plt.plot(np.ones((len(spes))) * spe_limit, 'r',label='SPE limit')
    plt.yscale('log')#设置纵坐标的缩放
    maxx = 0
    for item in spes:
        maxx = max(maxx, item)
    myylim = max(maxx + maxx / 20, 10 * spe_limit)
    if maxx <= 1.1 :
        myylim = 1.05
    ax2.set_ylim(0, myylim)
    # ax2.set_ylim(0,1.0)
    # plt.xlim(0,100)
    ax2.set_xlabel(u'Samples')
    ax2.set_ylabel(u'SPE')
    plt.legend()
    plt.show()


def figure_control_limit(X, t_limit, spe_limit, t_total, q_total):
    ## 画控制限的图
    plt.figure(2, figsize=(12, 7))
    ax1 = plt.subplot(2, 1, 1)
    plt.plot(t_total)
    plt.plot(np.ones((len(X))) * t_limit, 'r', label='95% $T^2$ control limit')
    # ax1.set_ylim(0,100)
    # plt.xlim(0,100)
    ax1.set_xlabel(u'Samples')
    ax1.set_ylabel(u'Hotelling $T^2$ statistic')
    plt.legend()

    ax2 = plt.subplot(2, 1, 2)
    plt.plot(q_total)
    plt.plot(np.ones((len(X))) * spe_limit, 'r', label='95% spe control limit')
    # ax1.set_ylim(0,30)
    # plt.xlim(0,100)
    ax2.set_xlabel(u'Samples')
    ax2.set_ylabel(u'SPE statistic')
    plt.legend()
    plt.show()




# test_data     测试数据               例: Yblock
# trian_data    训练数据               例: sonBlock
# Xtrain_nor    训练数据规范化数据      例: XsonBlock
def Contribution_graph(test_data, trian_data, index, p, p_all, v_all, k, t_limit, Xtrain_nor):
    # 贡献图
    index = 160
    # index = 4
    #1.确定造成失控状态的得分a
    # test_data = fault02_test
    # data_mean = data_mean = np.mean(Xtrain_nor, 0)
    data_mean = np.mean(Xtrain_nor, 0)
    data_std = np.std(Xtrain_nor, 0)
    test_data_submean = np.array(test_data - data_mean)
    test_data_norm = np.array((test_data - data_mean) / data_std)
    t = test_data_norm[index,:].reshape(1,test_data.shape[1])
    S = np.dot(t,p[:,:])
    r = []
    for i in range(k):
        if S[0,i]**2/v_all[i] > t_limit/k:
            r.append(i)
    # print(r)
    #2.计算每个变量相对于上述失控得分的贡献
    cont = np.zeros([len(r),test_data.shape[1]])
    for i in range(len(r)):
        for j in range(test_data.shape[1]):
            cont[i,j] = S[0,i]/v_all[r[i]]*p_all[r[i],j]*test_data_submean[index,j]
            if cont[i,j] < 0:
                cont[i,j] = 0
    #3.计算每个变量对T的总贡献
    a = cont.sum(axis = 0)
    #4.计算每个变量对Q的贡献 # 下面两步实现计算(25) CONQ_{b}^{j}
    I = np.eye(test_data.shape[1])  # 生成对角阵外，转化成one-hot数组(只有0,1)。
    e = (np.dot(test_data_norm[index,:],(I - np.dot(p, p.T))))**2

    # print("e=",e)

    ##画图
    plt.rcParams['font.sans-serif']=['SimHei']
    plt.rcParams['axes.unicode_minus'] = False
    font1 = {'family' : 'SimHei','weight' : 'normal','size'   : 23,}
    plt.figure(2,figsize=(16,9))
    ax1=plt.subplot(2,1,1)
    plt.bar(range(test_data.shape[1]),a)
    plt.xlabel(u'变量号',font1)
    plt.ylabel(u'T^2贡献率 %',font1)
    plt.legend()
    plt.show
    ax1=plt.subplot(2,1,2)
    plt.bar(range(test_data.shape[1]),e)
    plt.xlabel(u'变量号',font1)
    plt.ylabel(u'Q贡献率 %',font1)
    plt.legend()
    plt.show()




def PCA_x(train_data, test_data):

    # *****************使用pandas方法读取样本数据功能模块（结束）*********************
    m = train_data.shape[1];  # 获取数据表格的列数
    n = train_data.shape[0];  # 获取数据表格的行数
    # ******************数据标准化处理（开始）*********************
    S_mean = np.mean(train_data, axis=0)  # 健康数据矩阵的列均值
    S_mean = np.array(S_mean)  # 健康数据的列均值，narry数据类型
    S_var = np.std(train_data, ddof=1);  # 健康数据矩阵的列方差,默认ddof=0表示对正态分布变量的方差的最大似然估计，ddof=1提供了对无限总体样本的方差的无偏估计（与Matlab一致）
    # S_var[S_var == 0.0] = 0.0000000000000001  # 将集合S_var中的0替换为0.0000000000000001
    # for idx in range(0, 2):
    #     if S_var[idx] == 0.0:
    #         S_var[idx] = 0.0000000000000001
    S_var = np.array(S_var)  # 健康数据的列方差，narry数据类型
    train_data = train_data - S_mean  # 求取矩阵X的均值
    train_data = train_data / S_var  # 求取矩阵X的方差
    train_data = np.where(train_data < 4.0e+11, train_data, 0.0)  # 把标准化后的矩阵X中的0替换为0.0000000000000001 
    X_new = train_data;  # 求得标准化处理后的矩阵X_new
    # ******************求矩阵Y的协方差矩阵Z*********************
    X_new = np.transpose(X_new);  # 对矩阵进行转秩操作
    Z = np.dot(X_new, train_data / (n - 1))  # 求取协方差矩阵Z
    # ******************计算协方差矩阵Z的特征值和特征向量*********************
    # print("Z.shape=",np.array(Z).shape)
    a, b = np.linalg.eig(Z)  ##特征值赋值给a，对应特征向量赋值给b
    # print("a.shape=",np.array(a).shape)
    # print("b.shape=",np.array(b).shape)

    lambda1 = sorted(a, reverse=True)  # 特征值从大到小排序
    lambda_i = [round(i, 3) for i in lambda1]  # 保留三位小数
    # print('lambda特征值由大到小排列：', lambda_i)
    # 计算方差百分比
    sum_given = 0  # 设置初值为0
    sum_given = sum(lambda_i)
    variance_hud = []  # 设置存放方差百分比的矩阵
    for i in range(m):
        if i <= m:
            variance_hud.append(lambda_i[i] / sum_given)
        else:
            break
    variance_hud = [round(i, 3) for i in variance_hud]  # 保留三位小数
    # print('方差百分比从大到小排序：', variance_hud)

    # 累计贡献率
    leiji_1 = []
    new_value = 0
    for i in range(0, m):
        if i <= m:
            new_value = new_value + variance_hud[i]
            leiji_1.append(new_value)
        else:
            break

    # print('累计贡献率：', leiji_1)

    # ******************主元个数选取 *********************
    totalvar = 0   # 累计贡献率，初值0
    for i in range(m):
        totalvar = totalvar + lambda1[i] / sum(a)  # 累计贡献率，初值0
        if totalvar >= 0.95:
            k = i + 1  # 确定主元个数
            break  # 跳出for循环
    PCnum = k  # 选取的主元个数
    PC = np.eye(m, k)  # 定义一个矩阵，用于存放选取主元的特征向量
    for j in range(k):
        wt = a.tolist().index(lambda1[j])  # 查找排序完成的第j个特征值在没排序特征值里的位置。
        PC[:, j:j + 1] = b[:, wt:wt + 1]  # 提取的特征值对应的特征向量
    # print('成分矩阵：', PC)
    # print('贡献率85%以上的主元个数为：', k)

    df_cfjz = pd.DataFrame(PC)

    # ******************根据建模数据求取 T2 阈值限 *********************
    # ******************置信度 = (1-a)% =（1-0.05）%=95% *************
    F = stats.f.ppf(1 - 0.05, k, n - 1)  # F分布临界值
    T2 = k * (n - 1) * F / (n - k)  # T2求取
    # ****************** 健康数据的 SPE 阈值限求解  *********************
    ST1 = 0  # 对应SPE公式中的角1初值
    ST2 = 0  # 对应SPE公式中的角2初值
    ST3 = 0  # 对应SPE公式中的角3初值
    for i in range(k - 1, m):
        ST1 = ST1 + lambda1[i]  # 对应SPE公式中的角1
        ST2 = ST2 + lambda1[i] * lambda1[i]  # 对应SPE公式中的角2
        ST3 = ST3 + lambda1[i] * lambda1[i] * lambda1[i]  # 对应SPE公式中的角3
    h0 = 1 - 2 * ST1 * ST3 / (3 * pow(ST2, 2))
    Ca = 1.6449
    SPE = ST1 * pow(Ca * pow(2 * ST2 * pow(h0, 2), 0.5) / ST1 + 1 + ST2 * h0 * (h0 - 1) / pow(ST1, 2),
                    1 / h0)  # 健康数据SPE计算
    # ******************测试样本数据*********************
    m1 = test_data.shape[1];  # 获取数据表格的列数
    n1 = test_data.shape[0];  # 获取数据表格的行数
    test_data = np.array(test_data)  # 将DataFrame数据烈性转化为ndarray类型，使得数据矩阵与Matlab操作一样。
    # I = np.eye(m)  # 产生m*m的单位矩阵
    PC1 = np.transpose(PC)  # PC的转秩
    SPEa = np.arange(n1).reshape(1, n1)  # 定义测试数据的SPE矩阵,为正数矩阵
    SPEa = np.double(SPEa)  # 将正数矩阵，转化为双精度数据矩阵
    TT2a = np.arange(n1).reshape(1, n1)  # 定义测试数据的T2矩阵,为正数矩阵
    TT2a = np.double(TT2a)  # 将正数矩阵，转化为双精度数据矩阵
    DL = np.diag(lambda1[0:k])  # 特征值组成的对角矩阵
    DLi = np.linalg.inv(DL)  # 特征值组成的对角矩阵的逆矩阵
    # ******************绘制结果 *********************
    # mpl.rcParams['font.sans-serif'] = ['SimHei']  # 在图形中显示汉字
    for i in range(n1):
        xnew = (test_data[i, :] - S_mean) / S_var;  # 对应 Matlab程序：xnew=(Data2(i,1:m)-S_mean)./S_var;
        # 以下是实现Matlb程序：  err(1,i)=xnew*(eye(14)-PC*PC')*xnew';
        xnew1 = np.transpose(xnew)  # xnew的转秩
        PC1 = np.transpose(PC)  # PC的转秩
        XPC = np.dot(xnew, PC)  # 矩阵xnew与PC相乘
        XPCPC1 = np.dot(XPC, PC1)  # 矩阵XPC与PC1相乘
        XXPCPC1 = xnew - XPCPC1  # 矩阵xnew减去XPCPC1
        SPEa[0, i] = np.dot(XXPCPC1, XXPCPC1)  # 矩阵XXPCPC1与XXPCPC1相乘
        XPi = np.dot(XPC, DLi)  # 矩阵XPC与DLi相乘
        XPiP = np.dot(XPi, PC1)  # 矩阵XPi与PC1相乘
        TT2a[0, i] = np.dot(XPiP, xnew1)  # 矩阵XPiP与xnew1相乘
    SPE1 = SPE * np.ones((1, n1))  # 产生SPE数值相同的矩阵
    # print('spe统计量的值：', SPEa)
    # df_spe = pd.DataFrame(SPEa.T)
    new_SPE = SPEa.T
    # df_spe.to_csv('SPE值.csv')     # 将SPE值保存成.csv
    T21 = T2 * np.ones((1, n1))  # 产生T2数值相同的矩阵
    # print('t2统计量的值：', TT2a)
    # df_T2 = pd.DataFrame(TT2a.T)
    new_TT = TT2a.T
    # df_T2.to_csv('T2值.csv')       # 将T2值保存成.csv
    return new_SPE, new_TT, TT2a, T21, SPEa, SPE1, n1, T2, SPE, m, variance_hud, leiji_1, df_cfjz



def PCA_x_train(train_data, ratio = 0.85, confidence = 0.99):
    # *****************使用pandas方法读取样本数据功能模块（结束）*********************
    m = train_data.shape[1];  # 获取数据表格的列数
    n = train_data.shape[0];  # 获取数据表格的行数
    # ******************数据标准化处理（开始）*********************
    S_mean = np.mean(train_data, axis=0)  # 健康数据矩阵的列均值
    S_mean = np.array(S_mean)  # 健康数据的列均值，narry数据类型
    S_var = np.std(train_data, ddof=1);  # 健康数据矩阵的列方差,默认ddof=0表示对正态分布变量的方差的最大似然估计，ddof=1提供了对无限总体样本的方差的无偏估计（与Matlab一致）
    # S_var[S_var == 0.0] = 0.0000000000000001  # 将集合S_var中的0替换为0.0000000000000001
    # for idx in range(0, 2):
    #     if S_var[idx] == 0.0:
    #         S_var[idx] = 0.0000000000000001
    S_var = np.array(S_var)  # 健康数据的列方差，narry数据类型
    train_data = train_data - S_mean  # 求取矩阵X的均值
    train_data = train_data / S_var  # 求取矩阵X的方差
    train_data = np.where(train_data < 4.0e+11, train_data, 0.0)  # 把标准化后的矩阵X中的0替换为0.0000000000000001 
    X_new = train_data;  # 求得标准化处理后的矩阵X_new
    # ******************求矩阵Y的协方差矩阵Z*********************
    X_new = np.transpose(X_new);  # 对矩阵进行转秩操作
    Z = np.dot(X_new, train_data / (n - 1))  # 求取协方差矩阵Z
    # ******************计算协方差矩阵Z的特征值和特征向量*********************
    a, b = np.linalg.eig(Z)  ##特征值赋值给a，对应特征向量赋值给b

    lambda1 = sorted(a, reverse=True)  # 特征值从大到小排序
    lambda_i = [round(i, 3) for i in lambda1]  # 保留三位小数
    # 计算方差百分比
    sum_given = 0  # 设置初值为0
    sum_given = sum(lambda_i)
    variance_hud = []  # 设置存放方差百分比的矩阵
    for i in range(m):
        if i <= m:
            variance_hud.append(lambda_i[i] / sum_given)
        else:
            break
    variance_hud = [round(i, 3) for i in variance_hud]  # 保留三位小数

    # 累计贡献率
    leiji_1 = []
    new_value = 0
    for i in range(0, m):
        if i <= m:
            new_value = new_value + variance_hud[i]
            leiji_1.append(new_value)
        else:
            break

    # ******************主元个数选取 *********************
    totalvar = 0   # 累计贡献率，初值0
    for i in range(m):
        totalvar = totalvar + lambda1[i] / sum(a)  # 累计贡献率，初值0
        if totalvar >= ratio:
            k = i + 1  # 确定主元个数
            break  # 跳出for循环
    PCnum = k  # 选取的主元个数
    # print("ratio=",ratio)
    # print("PCnum=",PCnum)
    PC = np.eye(m, k)  # 定义一个矩阵，用于存放选取主元的特征向量
    for j in range(k):
        wt = a.tolist().index(lambda1[j])  # 查找排序完成的第j个特征值在没排序特征值里的位置。
        PC[:, j:j + 1] = b[:, wt:wt + 1]  # 提取的特征值对应的特征向量

    # ******************根据建模数据求取 T2 阈值限 *********************
    # ******************置信度 = (1-a)% =（1-0.05）%=95% *************
    F = stats.f.ppf(confidence, k, n - 1)  # F分布临界值
    T2 = k * (n - 1) * F / (n - k)  # T2求取
    # ****************** 健康数据的 SPE 阈值限求解  *********************
    ST1 = 0  # 对应SPE公式中的角1初值
    ST2 = 0  # 对应SPE公式中的角2初值
    ST3 = 0  # 对应SPE公式中的角3初值
    for i in range(k - 1, m):
        ST1 = ST1 + lambda1[i]  # 对应SPE公式中的角1
        ST2 = ST2 + lambda1[i] * lambda1[i]  # 对应SPE公式中的角2
        ST3 = ST3 + lambda1[i] * lambda1[i] * lambda1[i]  # 对应SPE公式中的角3
    h0 = 1 - 2 * ST1 * ST3 / (3 * pow(ST2, 2))
    # Ca = 1.6449
    Ca = norm.ppf(confidence)
    SPE = ST1 * pow(Ca * pow(2 * ST2 * pow(h0, 2), 0.5) / ST1 + 1 + ST2 * h0 * (h0 - 1) / pow(ST1, 2),
                    1 / h0)  # 健康数据SPE计算

    return PC, lambda1, S_mean, S_var, T2, SPE


def PCA_x_test(test_data, PC, lambda1, S_mean, S_var):
    m, k = np.array(PC).shape
    # ******************测试样本数据*********************
    m1 = test_data.shape[1];  # 获取数据表格的列数
    n1 = test_data.shape[0];  # 获取数据表格的行数
    test_data = np.array(test_data)  # 将DataFrame数据烈性转化为ndarray类型，使得数据矩阵与Matlab操作一样。
    # I = np.eye(m)  # 产生m*m的单位矩阵
    PC1 = np.transpose(PC)  # PC的转秩
    SPEa = np.arange(n1).reshape(1, n1)  # 定义测试数据的SPE矩阵,为正数矩阵
    SPEa = np.double(SPEa)  # 将正数矩阵，转化为双精度数据矩阵
    TT2a = np.arange(n1).reshape(1, n1)  # 定义测试数据的T2矩阵,为正数矩阵
    TT2a = np.double(TT2a)  # 将正数矩阵，转化为双精度数据矩阵
    DL = np.diag(lambda1[0:k])  # 特征值组成的对角矩阵
    DLi = np.linalg.inv(DL)  # 特征值组成的对角矩阵的逆矩阵
    # ******************绘制结果 *********************
    # mpl.rcParams['font.sans-serif'] = ['SimHei']  # 在图形中显示汉字
    for i in range(n1):
        xnew = (test_data[i, :] - S_mean) / S_var;  # 对应 Matlab程序：xnew=(Data2(i,1:m)-S_mean)./S_var;
        # 以下是实现Matlb程序：  err(1,i)=xnew*(eye(14)-PC*PC')*xnew';
        xnew1 = np.transpose(xnew)  # xnew的转秩
        PC1 = np.transpose(PC)  # PC的转秩
        XPC = np.dot(xnew, PC)  # 矩阵xnew与PC相乘
        XPCPC1 = np.dot(XPC, PC1)  # 矩阵XPC与PC1相乘
        XXPCPC1 = xnew - XPCPC1  # 矩阵xnew减去XPCPC1
        SPEa[0, i] = np.dot(XXPCPC1, XXPCPC1)  # 矩阵XXPCPC1与XXPCPC1相乘
        XPi = np.dot(XPC, DLi)  # 矩阵XPC与DLi相乘
        XPiP = np.dot(XPi, PC1)  # 矩阵XPi与PC1相乘
        TT2a[0, i] = np.dot(XPiP, xnew1)  # 矩阵XPiP与xnew1相乘
    new_SPE = SPEa.T

    new_TT = TT2a.T
    return new_TT, new_SPE




def figure_control_limit_t2_spe_block(t_limits, spe_limits, t2s, spes, l, r):
    ## 画控制限的图
    plt.figure(28, figsize=(17, 6))
    x_idx = np.arange(0,400)

    #(0,5) 1 - 5
    #(5,10) 6 - 10
    #(10, 14) 11 - 14
    # l = 0
    # r = 5
    for idx in range(l, r):
        print("idx=", idx)
        tmp = plt.subplot(2, 5, idx - l + 1)
        plt.plot(x_idx, t2s[idx])
        plt.plot(x_idx, np.ones(400) * t_limits[idx], 'r')
        if idx == l:
            tmp.set_ylabel(u'$T^2$')
        title = 'block ' + str(idx + 1)
        plt.title(title)

        tmp = plt.subplot(2, 5, idx - l + 6)
        plt.plot(x_idx, spes[idx])
        plt.plot(x_idx, np.ones(400) * spe_limits[idx], 'r')
        tmp.set_xlabel(u'Samples number')
        if idx == l:
            tmp.set_ylabel(u'SPE')

    plt.show()


def draw_block_res_kld(BD_PC_KLDs, BD_e_limits, BD_PC_nums, maxx, l, r):
    ## 画控制限的图
    plt.figure(16, figsize=(13, 15))
    # 数值例子: 200
    # 正常：960
    x_idx = np.arange(0,400)

    for idx in range(l, r):
        print("idx=", idx)
        
        arr = []
        # base = 0.5
        for jdx in range(len(BD_PC_KLDs[idx][0])):
            maxx = 0
            summ = 1
            # s_ar = []
            # tarr = []
            for zdx in range(0, BD_PC_nums[idx]):
                # maxx = max(maxx, BD_PC_KLDs[idx][zdx][jdx])
                # base = 1
                summ = summ + BD_PC_KLDs[idx][zdx][jdx]
                maxx = maxx + BD_PC_KLDs[idx][zdx][jdx] * BD_PC_KLDs[idx][zdx][jdx]
                # s_ar.append(BD_PC_KLDs[idx][zdx][jdx])
            arr.append(maxx / summ)
            # s_ar.sort()


            # tsum = 0
            # tnum = 0
            # for tdx in range(1, len(s_ar) - 1):
            #     tsum = tsum + s_ar[tdx]
            #     tnum = tnum + 1
            # if tnum <= 2:
            #     tsum = maxx / len(s_ar)
            # else:
            #     tsum = (maxx - s_ar[0] - s_ar[len(s_ar) - 1]) / (len(s_ar) - 2)
            # if len(s_ar) <= 2:
            #     tsum = maxx / len(s_ar)
            # else:
            #     tsum = s_ar[(int)(len(s_ar) / 2)]
            # arr.append(tsum)


        e_limit = 0
        e_sum = 0
        for zdx in range(0, BD_PC_nums[idx]):
            e_limit = e_limit + BD_e_limits[idx][zdx] * BD_e_limits[idx][zdx]
            e_sum += BD_e_limits[idx][zdx]
        e_limit /= e_sum

        maxx_y = 0
        for item in arr:
            maxx_y = max(maxx_y, item)
        myylim = max(maxx_y + maxx_y / 20, 3 * e_limit)

        tmp = plt.subplot(2, 3, idx - l + 1)

        tmp.set_ylim(0, myylim)
        plt.plot(x_idx, arr)
        plt.plot(x_idx, np.ones(400) * e_limit, 'r')
        title = 'block ' + str(idx + 1)
        plt.title(title)

        tmp.set_ylabel(u'$KLD$')
        tmp.set_xlabel(u'Samples number')
        tmp.set
    plt.show()