import numpy as np
from keras.callbacks import EarlyStopping
from sklearn import metrics
from keras.optimizers import Adam, SGD
from keras.layers import Input, Conv1D, MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, Dropout, Flatten, Dense, \
    BatchNormalization, Activation, Concatenate, Reshape, Multiply
from keras.models import Model, load_model
from keras.regularizers import l1, l2
from keras.utils import to_categorical


def build_model(windows=15, concat_axis=-1, cycle=1, layers=3, filters=32,
                growth_rate=32, dropout_rate=0.1, weight_decay=1e-4):
    input_1 = Input(shape=(2 * windows + 1, 20))
    input_2 = Input(shape=(2 * windows + 1, 531))
    input_3 = Input(shape=(2 * windows + 1, 21))

    x_1 =input_1
    x_1 = Conv1D(filters=filters, kernel_size=3,
                 kernel_initializer="he_normal",
                 padding="same", use_bias=True,
                 kernel_regularizer=l2(weight_decay))(x_1)
    x_1 = Activation('relu')(x_1)
    x_1 = MaxPooling1D(pool_size=2,strides=1,padding='same')(x_1)
    x_1 = Dropout(0.3)(x_1)
    # x_1=Flatten()(x_1)
    x_1 = GlobalAveragePooling1D()(x_1)

    x_2 = input_2
    x_2 = Conv1D(filters=filters, kernel_size=3,
                 kernel_initializer="he_normal",
                 padding="same", use_bias=True,
                 kernel_regularizer=l2(weight_decay))(x_2)
    x_2 = Activation('relu')(x_2)
    x_2 = MaxPooling1D(pool_size=2,strides=1,padding='same')(x_2)
    x_2 = Dropout(0.3)(x_2)
    # x_2=Flatten()(x_2)
    x_2 = GlobalAveragePooling1D()(x_2)

    x_3 = input_3
    x_3 = Conv1D(filters=filters, kernel_size=3,
                 kernel_initializer="he_normal",
                 padding="same", use_bias=True,
                 kernel_regularizer=l2(weight_decay))(x_3)
    x_3 = Activation('relu')(x_3)
    x_3 = MaxPooling1D(pool_size=2,strides=1,padding='same')(x_3)
    x_3 = Dropout(0.3)(x_3)
    # x_3=Flatten()(x_3)
    x_3 = GlobalAveragePooling1D()(x_3)


    x = Concatenate(axis=-1)([x_1, x_2, x_3])
    x = Dense(units=2, activation="softmax", use_bias=True,
              kernel_initializer='he_normal',
              kernel_regularizer=l2(weight_decay))(x)

    model = Model(inputs=[input_1, input_2, input_3], outputs=[x], name="DenseBlock")
    optimizer = Adam(lr=1e-4, epsilon=1e-8)


    model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])

    return model


def perform_eval_1(predictions, Y_test, verbose=0):
    # class_label = np.uint8([round(x) for x in predictions[:, 0]]) # round()函数进行四舍五入
    # R_ = np.uint8(Y_test)
    # R = np.asarray(R_)
    class_label = np.uint8(np.argmax(predictions, axis=1))
    R = np.asarray(np.uint8([sublist[1] for sublist in Y_test]))

    CM = metrics.confusion_matrix(R, class_label, labels=None)
    CM = np.double(CM)  # CM[0][0]：TN，CM[0][1]：FP，CM[1][0]：FN，CM[1][1]：TP
    print("TP:"+str(CM[1][1]))
    print("TN:"+str(CM[0][0]))
    print("FN:"+str(CM[1][0]))
    print("FP:"+str(CM[0][1]))



    # 计算各项指标
    sn = (CM[1][1]) / (CM[1][1] + CM[1][0])  # TP/(TP+FN)
    sp = (CM[0][0]) / (CM[0][0] + CM[0][1])  # TN/(TN+FP)
    acc = (CM[1][1] + CM[0][0]) / (CM[1][1] + CM[0][0] + CM[0][1] + CM[1][0])  # (TP+TN)/(TP+TN+FP+FN)
    pre = (CM[1][1]) / (CM[1][1] + CM[0][1])  # TP/(TP+FP)
    f1 = (2 * CM[1][1]) / (2 * CM[1][1] + CM[0][1] + CM[1][0])  # 2*TP/(2*TP+FP+FN)
    mcc = (CM[1][1] * CM[0][0] - CM[0][1] * CM[1][0]) / np.sqrt(
        (CM[1][1] + CM[0][1]) * (CM[1][1] + CM[1][0]) * (CM[0][0] + CM[0][1]) * (
                    CM[0][0] + CM[1][0]))  # (TP*TN-FP*FN)/((TP+FP)*(TP+FN)*(TN+FP)*(TN+FN))^1/2
    # mcc=0
    gmean = np.sqrt(sn * sp)
    auroc = metrics.roc_auc_score(y_true=R, y_score=np.asarray(predictions)[:, 1], average="macro")
    aupr = metrics.average_precision_score(y_true=R, y_score=np.asarray(predictions)[:, 1], average="macro")

    if verbose == 1:
        print("Sn(Recall):", "{:.4f}".format(sn), "Sp:", "{:.4f}".format(sp), "Acc:", "{:.4f}".format(acc),
              "Pre(PPV):", "{:.4f}".format(pre), "F1:", "{:.4f}".format(f1), "MCC:", "{:.4f}".format(mcc),
              "G-mean:", "{:.4f}".format(gmean), "AUROC:", "{:.4f}".format(auroc), "AUPR:", "{:.4f}".format(aupr))

    return [sn, sp, acc, pre, f1, mcc, gmean, auroc, aupr]


# 说明： 实验结果保存到文件
# 输入： 文件标识符和结果
# 输出： 无
def write_res_1(filehandle, res, fold=0):
    filehandle.write("Fold: " + str(fold) + " ")
    filehandle.write("Sn(Recall): %s Sp: %s Acc: %s Pre(PPV): %s F1: %s MCC: %s G-mean: %s AUROC: %s AUPR: %s\n" %
                     ("{:.4f}".format(res[0]),
                      "{:.4f}".format(res[1]),
                      "{:.4f}".format(res[2]),
                      "{:.4f}".format(res[3]),
                      "{:.4f}".format(res[4]),
                      "{:.4f}".format(res[5]),
                      "{:.4f}".format(res[6]),
                      "{:.4f}".format(res[7]),
                      "{:.4f}".format(res[8]))
                     )
    filehandle.flush()
    return

if __name__ == '__main__':

    # 超参数设置
    BATCH_SIZE = 50
    K_FOLD = 10
    N_EPOCH = 200
    WINDOWS = 32

    # 打开保存结果的文件
    res_file = open(f"./result/cv/RNN-window{str(WINDOWS)}-result.txt", "w", encoding='utf-8')
    # 创建空列表，保存每折的结果
    res = []
    # 分层交叉验证
    for fold in range(K_FOLD):
        # 从文件读取序列片段（训练+验证，阳性+阴性）
        f_r_train = open(f"hit-40/cv10/{str(WINDOWS)}/train/train-{str(fold + 1)}.csv", "r", encoding='utf-8')
        f_r_test = open(f"hit-40/cv10/{str(WINDOWS)}/test/test-{str(fold + 1)}.csv", "r", encoding='utf-8')

        # 训练序列片段构建
        train_data = f_r_train.readlines()

        # 预测序列片段构建
        test_data = f_r_test.readlines()

        # 关闭文件
        f_r_train.close()
        f_r_test.close()

        # 数据编码
        # from information_coding import one_hot, Phy_Chem_Inf, Structure_Inf
        # from AAC import one_AAC
        # from PSSM import one_PSSM
        # from AAINDEX import one_AAINDEX
        from AAINDEX import one_AAINDEX
        from BEAA import one_BEAA
        from BLOSUM62 import one_BLOSUM62

        # from CKSAAP import one_CKSAAP

        # one_hot编码序列片段
        train_X_1, train_Y = one_BLOSUM62(train_data, windows=WINDOWS)
        train_Y = to_categorical(train_Y, num_classes=2)
        test_X_1, test_Y = one_BLOSUM62(test_data, windows=WINDOWS)
        test_Y = to_categorical(test_Y, num_classes=2)
        # 理化属性信息
        train_X_2, _ = one_AAINDEX(train_data, windows=WINDOWS)
        test_X_2, _ = one_AAINDEX(test_data, windows=WINDOWS)
        # 蛋白质结构信息
        train_X_3, _ = one_BEAA(train_data, windows=WINDOWS)
        test_X_3, _ = one_BEAA(test_data, windows=WINDOWS)

        # 引入模型
        model = build_model(windows=WINDOWS)
        # 打印模型
        model.summary()

        # 训练模型
        print("fold:", str(fold))
        # 早停
        history = model.fit(x=[train_X_1, train_X_2, train_X_3], y=train_Y, batch_size=BATCH_SIZE, epochs=N_EPOCH, callbacks=[EarlyStopping(monitor='val_loss', patience=10, mode='auto')], shuffle=True, class_weight={0: 1.0, 1: 1.41}, verbose=2, validation_data=([test_X_1, test_X_2, test_X_3], test_Y))

        # 得到预测结果
        predictions = model.predict(x=[test_X_1, test_X_2, test_X_3], verbose=0)

        # 验证预测结果
        res = perform_eval_1(predictions, test_Y, verbose=1)

        # 将结果写入文件
        write_res_1(res_file, res, fold)

    # 关闭文件
    res_file.close()

