import tensorflow as tf
import numpy as np
from sklearn.model_selection import train_test_split  #数据集的分割函数
from sklearn.preprocessing import StandardScaler      #数据预处理
from sklearn import (svm,tree)
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import (auc, f1_score, precision_recall_curve,roc_curve,average_precision_score,
                             roc_auc_score,recall_score,accuracy_score,precision_score)
#from sklearn.metrics import PrecisionRecallDisplay
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import KFold
from scipy import interp #报错不影响运行
import matplotlib.pyplot as plt
from utils import *
import time
import math

#graph=tf.get_default_graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True


def load_data(infile,outfile,pos):
    file1=infile+'1/'+str(feature_type)+'/'+str(feature_type).lower()+'_rna.emb'
    file2=infile+'2/'+str(feature_type)+'/'+str(feature_type).lower()+'_rna_gene.emb'
    file3=infile+'2/rna.txt'
    RNA1 = load_feature_data(file1)#255
    RNA2 = load_feature_data(file2)#16216
    feature_dim=len(list(RNA1.values())[0])
    print("feature_dim: ",feature_dim)
    vocab=load_dic(file3)#2599
    num_nodes=len(list(vocab.keys()))
    print("num_nodes: ",num_nodes)
    if pos==3:
        features = np.zeros((num_nodes, feature_dim*2), dtype=np.float32)
        for key in RNA1.keys():
            features[int(key) - 1, :] = np.append(np.array(RNA1[key]), np.zeros((1, feature_dim), dtype=np.float32))
        num = 0
        for key in RNA2.keys():
            if int(key) <= num_nodes:
                features[int(key) - 1, :] = np.append(np.zeros((1, feature_dim), dtype=np.float32), np.array(RNA2[key]))
                num = num + 1
        for key in RNA1.keys():
            if key in RNA2:
                features[int(key) - 1, :] = np.array(RNA1[key] + RNA2[key])
    else:
        features = np.zeros((num_nodes, feature_dim), dtype=np.float32)
        if pos==1:
            for key in RNA1.keys():
                features[int(key) - 1, :] = np.array(RNA1[key])
        elif pos == 2:
            for key in RNA2.keys():
                if int(key) <= num_nodes:
                    features[int(key) - 1, :] = np.array(RNA2[key])
        else:
            print("pos error!!!!!!!!")
    np.savetxt(outfile+"miRNA_features.txt",features)
    if pos == 3:
        print('miRNA-gene miRNA number:',num)
    print('load data finish!')
    return features

def weight_variables(shape):
    """权重"""
    w = tf.Variable(tf.random_normal(shape=shape, mean=0.0, stddev=1.0/ math.sqrt(n_fc1), seed=1))
    #    w=tf.Variable(tf.truncated_normal(shape=shape,mean=0.0,stddev=1.0,seed=1))
    return w

def bias_variables(shape):
    """偏置"""
    b = tf.Variable(tf.constant(0.0, shape=shape))
    return b

def evaluate(equal_list,y_true, y_score,predict,flag):

    ps, rs, _ = precision_recall_curve(y_true, y_score)
    #recall = tf.metrics.recall(Y_truelable, Y_predictlable)
    #precision = tf.metrics.precision(Y_truelable, Y_predictlable)
    aupr = auc(rs, ps)
    roc = roc_auc_score(y_true, y_score)

    #epsilon = 1e-7
    #f1 = 2 * recall * precision / (recall + precision + epsilon)
    f1=f1_score(y_true, predict)
    #print(y_true)
    #print(y_score)

    # equal_list None个样本
    accuracy = tf.reduce_mean(tf.cast(equal_list, tf.float32))
    if flag==1 and False:
        # 设置横纵坐标的名称以及对应字体格式
        font1 = {'family': 'Times New Roman',
                 'weight': 'normal',
                 'size': 30,
                 }
        fig = plt.figure(figsize=(50, 25))
        tick = [0, 0.2, 0.4, 0.6, 0.8, 1]
        ax1 = plt.subplot(1, 2, 1)
        # 第一行第二列图形
        ax2 = plt.subplot(1, 2, 2)
        fpr, tpr, thresholds = roc_curve(y_true, y_score)
        roc_auc = auc(fpr, tpr)
        plt.sca(ax1)
        plt.plot(fpr, tpr, lw=2,  color='darkorange',label='ROC(area = {})'.format(roc_auc))
        plt.xlim([0.0, 1.0]);
        plt.ylim([0.0, 1.05]);
        plt.xlabel('False Positive Rate',font1);
        plt.xticks(tick, fontsize=20, color='#000000')
        plt.yticks(tick, fontsize=20, color='#000000')
        plt.ylabel('True Positive Rate',font1);
        plt.title('ROC Curve',font1);
        plt.legend(loc="lower right",fontsize=20);

        plt.sca(ax2)
        plt.plot(rs, ps, lw=2, label='AUPR(area = {})'.format(aupr))
        plt.xticks(tick, fontsize=20, color='#000000')
        plt.yticks(tick, fontsize=20, color='#000000')
        plt.xlim([0.0, 1.0]);
        plt.ylim([0.0, 1.05]);
        plt.ylabel('Precision', font1);
        plt.xlabel('Recall', font1);
        plt.title('PR Curve',font1);
        plt.legend(loc="lower right",fontsize=20)

        ticks = str(time.time()).replace('.', '')
        plt.savefig(outfile + "AUPR-AUROC-" + ticks + '.png')

    return accuracy,aupr,roc,f1

def model():
    with tf.variable_scope("data"):
        x = tf.placeholder(tf.float32, [None, n_future])
        y_true = tf.placeholder(tf.int32, [None, n_class])
        rate = tf.placeholder(tf.float32)

    #with tf.variable_scope('cnn'):
        # shape (batch size, max sentence length, kernel nums)
     #   feature_maps = tf.layers.conv1d(
      #      inputs=x,
       #     kernel_size=args.kernel_size,
        #    filters=args.filters,
         #   padding='same',
          #  use_bias=True,
           # activation=tf.nn.tanh,
            #name='convolution'
        #)
    with tf.variable_scope("cov1"):
        w_conv1 = weight_variables([1, kernel_size, 1, n_hidden])
        b_conv1 = bias_variables([n_hidden])#16

        x_reshape = tf.reshape(x, [-1, 1, n_future, 1])
        # 卷积、激活
        #x_relu1 =tf.nn.conv2d(x_reshape, w_conv1, strides=[1, 1, 1, 1], padding="SAME")
        x_relu1 = tf.nn.relu(tf.nn.conv2d(x_reshape, w_conv1, strides=[1, 1, 1, 1], padding="SAME") + b_conv1)#(16,1024)
        #x_relu1 = tf.nn.relu(x_reshape + b_conv1)
        # 池化,SAME依旧会缩小第三维特征
        x_pool1 = tf.nn.max_pool(x_relu1, ksize=[1, 1, 2, 1], strides=[1, 1, 2, 1], padding="SAME")#(16,512)

    with tf.variable_scope("conv_fc"):
        # 全连接
        w_fc1 = weight_variables([1 * future_out * n_hidden, n_fc1])#2,取消pool #(16*512,256)
        b_fc1 = bias_variables([n_fc1])#256
        x_fc_reshape = tf.reshape(x_pool1, [-1, 1 * future_out * n_hidden])#2,取消pool #(16*512)
        y_fc1 = tf.matmul(x_fc_reshape, w_fc1) + b_fc1 #256
        h_fc1_drop = tf.nn.dropout(y_fc1,keep_prob =1-rate)

        w_fc2 = weight_variables([n_fc1, n_class])
        b_fc2 = bias_variables([n_class])
        y_predict = tf.matmul(h_fc1_drop, w_fc2) + b_fc2

    with tf.variable_scope("soft_cross"):
        loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_true, logits=y_predict))

    with tf.variable_scope("opmizer"):
        #        train_op=tf.train.AdamOptimizer(0.01).minimize(loss)
        train_op = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(loss)

    #with tf.variable_scope("evaluate"):

    return x, y_true, rate, y_fc1, train_op, y_predict

def train():
    print('start train!')
    x, y_true, rate, y_fc1, train_op, y_predict = model()
    init_op = tf.global_variables_initializer()

    training_epochs = args.epoch
    batch_size = args.batch_size
    total_batches = X_train.shape[0] // batch_size
    with tf.Session( config=config) as sess:#graph=graph,config=config
        sess.run(init_op)
        for epoch in range(training_epochs):
            for i in range(total_batches):
                start = (i * batch_size) % X_train.shape[0]  #
                end = start + batch_size
                sess.run(train_op, feed_dict={x: X_train[start:end], y_true: Y_train[start:end], rate: 0.5})
                if i % 50 == 0:
                    label_true, y_prediction= sess.run([y_true, y_predict], feed_dict={x: X_vld, y_true: Y_vld, rate: 0})
                    score = np.array(tf.nn.softmax(y_prediction, axis=1).eval())
                    Y_truelable = tf.argmax(label_true, 1)
                    Y_predictlable = tf.argmax(y_prediction, 1)
                    equal_list = tf.equal(Y_truelable, Y_predictlable)
                    label_true = np.array(label_true)
                    y_prediction = np.array(y_prediction)
                    true_list = list()
                    prediction_list = list()
                    for i in range(len(label_true)):
                        prediction_list.append(score[i][1])
                        if label_true[i][1] > label_true[i][0]:
                            true_list.append(1)
                        else:
                            true_list.append(0)
                    predict = np.zeros(len(true_list), dtype=np.int32)
                    for i in range(len(y_prediction)):
                        if y_prediction[i][0]>y_prediction[i][1]:
                            predict[i]=1
                    y_score=np.array(prediction_list)

                    #print(y_predict)
                    #print(label_true)
                    with open(outfile + 'y_predict.txt', 'w') as f:
                        f.write(str(y_prediction));
                        f.close()
                    with open(outfile + 'score.txt', 'w') as f:
                        f.write(str(score  ));
                        f.close()
                    with open(outfile + 'label_true.txt', 'w') as f:
                        f.write(str(label_true));
                        f.close()
                    with open(outfile + 'equal_list.txt', 'w') as f:
                        f.write(str(equal_list.eval()));
                        f.close()
                    label_true = np.array(true_list)
                    flag=0
                    output=evaluate(equal_list,label_true, y_score,predict,flag)
                    print("Epoch %d,Steps %d，validation accuracy：%f" % (epoch + 1, i,output[0].eval()))
                    print("Epoch %d,Steps %d，validation aupr：%f" % (epoch + 1, i,output[1]))
                    print("Epoch %d,Steps %d，validation roc：%f" % (epoch + 1, i,output[2]))
                    print("Epoch %d,Steps %d，validation f1：%f" % (epoch + 1, i,output[3]))
        print("*" * 50)
        label_true, y_prediction = sess.run([y_true, y_predict],
                                                   feed_dict={x: X_test, y_true: Y_test, rate: 0})
        score = np.array(tf.nn.softmax(y_prediction, axis=1).eval())
        Y_truelable = tf.argmax(label_true, 1)
        Y_predictlable = tf.argmax(y_prediction, 1)
        equal_list = tf.equal(Y_truelable, Y_predictlable)
        label_true = np.array(label_true)
        y_prediction = np.array(y_prediction)
        true_list = list()
        prediction_list = list()
        for i in range(len(label_true)):
            prediction_list.append(score[i][1])
            if label_true[i][1] > label_true[i][0]:
                true_list.append(1)
            else:
                true_list.append(0)
        predict = np.zeros(len(true_list), dtype=np.int32)
        for i in range(len(y_prediction)):
            if y_prediction[i][0] > y_prediction[i][1]:
                predict[i] = 1
        y_score = np.array(prediction_list)
        label_true = np.array(true_list)
        flag=1
        output = evaluate(equal_list, label_true, y_score, predict,flag)

        print("CNN test accuracy：%f, test aupr：%f， test roc：%f，test f1：%f" % (output[0].eval(),output[1],output[2],output[3]))
        x_temp1 = sess.run(y_fc1, feed_dict={x: X_train})
        x_temp2 = sess.run(y_fc1, feed_dict={x: X_test})

        clf1 = GaussianNB()
        #svm.SVC(C=0.9, kernel='linear',probability=True)  # linear kernel
        # SVM训练只取一部分数据
        clf1.fit(x_temp1[:], Y_train_1[:])
        # SVM选择了linear核，C选择了0.9

        #with open(outfile + 'x_temp2.txt', 'w') as f:
        #    f.write(str(x_temp2));
        #    f.close()

        pred1 = clf1.predict(x_temp2)
        pred1_proba = clf1.predict_proba(x_temp2)[:, 1]
        if predict_train==1:
            pred_dict = dict(zip(range(1, len(pred1_proba) + 1), pred1_proba))
            pred_sorted = sorted(pred_dict.items(), key=lambda kv: (kv[1], kv[0]), reverse=True)
            pred_sorted_50 = pred_sorted[:99]
            with open(outfile + 'pred_proba_100.txt', 'w') as f:
                f.write(str(pred_sorted_50));
                f.close()
            vocab = load_dic(infile + '2//rna.txt')
            new_pred_sorted_50={}
            for key in vocab:
                vocab[key] = ''.join(vocab[key])
            #print(vocab.keys())
            for value in pred_sorted_50:
                new_pred_sorted_50[vocab.setdefault(str(value[0]),'keyerror')]=value[1]
            with open(outfile + 'result_100.txt', 'w') as f:
                for key in new_pred_sorted_50:
                    f.write(str((key,new_pred_sorted_50[key]))+'\n')
                f.close()
        precision1, recall1, _ = precision_recall_curve(Y_test_1, pred1_proba)
        fpr, tpr, thresholds = roc_curve(Y_test_1, pred1_proba)
        print("CNN+GaussianNB test accuracy: %f, test aupr：%f， test roc：%f，test f1：%f" %(clf1.score(x_temp2, Y_test_1),auc( recall1,precision1),
                                                                             roc_auc_score(Y_test_1, pred1_proba),f1_score(Y_test_1, pred1)))
        return [output[0].eval(),output[1],output[2],output[3]],[clf1.score(x_temp2, Y_test_1),auc( recall1,precision1),
                                                                             roc_auc_score(Y_test_1, pred1_proba),f1_score(Y_test_1, pred1)],\
               [recall1,precision1,fpr, tpr]

def sklearn_model():
    clf1 = svm.SVC(C=0.8, kernel='linear',probability=True)  # linear kernel
    # SVM训练只取一部分数据
    clf1.fit(X_train[:], Y_train_1[:])

    clf2 = tree.DecisionTreeClassifier(criterion='entropy', min_samples_leaf=3)
    clf2.fit(X_train[:], Y_train_1[:])

    clf3 = LogisticRegression(max_iter=1000)
    clf3.fit(X_train[:], Y_train_1[:])

    clf4 = RandomForestClassifier(n_estimators=160)
    clf4.fit(X_train[:], Y_train_1[:])

    clf5 = MLPClassifier(activation='relu', alpha=1e-05, batch_size='auto', beta_1=0.9,
       beta_2=0.999, early_stopping=False, epsilon=1e-08,
       hidden_layer_sizes=(5, 2), learning_rate='constant',
       learning_rate_init=0.001, max_iter=1000, momentum=0.9,
       nesterovs_momentum=True, power_t=0.5, random_state=2, shuffle=True,
       solver='lbfgs', tol=0.01, validation_fraction=0.1, verbose=False,
       warm_start=False)
    clf5.fit(X_train[:], Y_train_1[:])
    print('层数：%s，输出单元数量：%s' % (clf5.n_layers_, clf5.n_outputs_))

    clf6 = XGBClassifier(max_depth=5,learning_rate=0.1,n_estimators=50,eval_metric=['logloss','auc','error'], use_label_encoder=False,objective='binary:logistic')
    clf6.fit(X_train[:], Y_train_1[:])
    clf7 = GaussianNB(var_smoothing=1,priors=(0.98,0.02))
    clf7.fit(X_train[:], Y_train_1[:])

    #classifiers=[clf2,clf3,clf4,clf5,clf6,clf7]
    #plot(miRNA, X_train, Y_train_1, X_test, Y_test_1, X_vld, Y_vld_1,outfile)

    # print("SVM test accuracy: ", clf.score(X_test, Y_test_1))
    pred1 = clf1.predict(X_test)
    pred1_proba = clf1.predict_proba(X_test)[:,1]
    #print( clf1.predict_proba(X_test)[:, 0])
    #print(clf1.predict_proba(X_test)[:, 1])
    precision1, recall1, _ = precision_recall_curve(Y_test_1, pred1_proba)
    print("precision:"+str(precision_score(Y_test_1, pred1)))
    print('recall:'+str(recall_score(Y_test_1, pred1)))
   # disp1 = PrecisionRecallDisplay(precision=precision1, recall=recall1)
    #disp1.plot()
    #ticks = str(time.time()).replace('.','')
    #plt.savefig(outfile+"clf1"+ticks+'.png')
    #fpr1, tpr1, _ = roc_curve(Y_test_1, pred1_proba, pos_label=1)
    print("SVM test accuracy：%f, test aupr：%f， test roc：%f，test f1：%f" % (
    clf1.score(X_test, Y_test_1), auc( recall1,precision1),
    roc_auc_score(Y_test_1, pred1_proba), f1_score(Y_test_1, pred1)))
    pred2 = clf2.predict(X_test)
    pred2_proba=clf2.predict_proba(X_test)[:,1]
    #print(clf2.predict_proba(X_test)[:,0])
    #print(clf2.predict_proba(X_test)[:, 1])
    precision2, recall2, _ = precision_recall_curve(Y_test_1, pred2_proba)
    print("precision:"+str(precision_score(Y_test_1, pred2)))
    print('recall:'+str(recall_score(Y_test_1, pred2)))
    #disp2 = PrecisionRecallDisplay(precision=precision2, recall=recall2)
    #disp2.plot()
    #ticks = str(time.time()).replace('.','')
    #plt.savefig(outfile+"clf2"+ticks+'.png')
    #fpr2, tpr2, _ = roc_curve(Y_test_1, pred2_proba, pos_label=1)
    print("Decision Trees test accuracy：%f, test aupr：%f， test roc：%f，test f1：%f" % (
    clf2.score(X_test, Y_test_1), auc( recall2,precision2),
    roc_auc_score(Y_test_1, pred2_proba), f1_score(Y_test_1, pred2)))
    pred3 = clf3.predict(X_test)
    pred3_proba = clf3.predict_proba(X_test)[:,1]
    precision3, recall3, _ = precision_recall_curve(Y_test_1, pred3_proba)
    print("precision:"+str(precision_score(Y_test_1, pred3)))
    print('recall:'+str(recall_score(Y_test_1, pred3)))
   # disp3 = PrecisionRecallDisplay(precision=precision3, recall=recall3)
   # disp3.plot()
    #ticks = str(time.time()).replace('.','')
    #plt.savefig(outfile+"clf3"+ticks+'.png')
   # fpr3, tpr3, thresholds3 = roc_curve(Y_test_1, pred3_proba, pos_label=1)
    print("LogisticRegression test accuracy：%f, test aupr：%f， test roc：%f，test f1：%f" % (
    clf3.score(X_test, Y_test_1), auc( recall3,precision3),
    roc_auc_score(Y_test_1, pred3_proba), f1_score(Y_test_1, pred3)))
    pred4 = clf4.predict(X_test)
    pred4_proba = clf4.predict_proba(X_test)[:,1]
    #print(clf4.predict_proba(X_test)[:, 0])
    #print(clf4.predict_proba(X_test)[:, 1])
    precision4, recall4, _ = precision_recall_curve(Y_test_1, pred4_proba)
    print("precision:"+str(precision_score(Y_test_1, pred4)))
    print('recall:'+str(recall_score(Y_test_1, pred4)))
    #print(Y_test_1)
    #print(pred4)
    #print(pred4_proba)
   # disp4 = PrecisionRecallDisplay(precision=precision4, recall=recall4)
   # disp4.plot()
    #ticks = str(time.time()).replace('.','')
    #plt.savefig(outfile+"clf4"+ticks+'.png')

    #fpr4, tpr4, thresholds4 = roc_curve(Y_test_1, pred4_proba, pos_label=1)
    print("RandomForest test accuracy：%f, test aupr：%f， test roc：%f，test f1：%f" % (
    clf4.score(X_test, Y_test_1), auc( recall4,precision4),
    roc_auc_score(Y_test_1, pred4_proba), f1_score(Y_test_1, pred4)))
    pred5 = clf5.predict(X_test)
    pred5_proba = clf5.predict_proba(X_test)[:,1]
    #fpr5, tpr5, thresholds = roc_curve(Y_test_1, pred5_proba, pos_label=1)
    precision5, recall5, _ = precision_recall_curve(Y_test_1, pred5_proba)
    print("MLP test accuracy：%f, test aupr：%f， test roc：%f，test f1：%f" % (
    clf5.score(X_test, Y_test_1), auc( recall5,precision5),
    roc_auc_score(Y_test_1, pred5_proba), f1_score(Y_test_1, pred5)))
    pred6 = clf6.predict(X_test)
    pred6_proba = clf6.predict_proba(X_test)[:,1]
    #fpr6, tpr6, thresholds = roc_curve(Y_test_1, pred6_proba, pos_label=1)
    precision6, recall6, _ = precision_recall_curve(Y_test_1, pred6_proba)
    print("xgboost test accuracy：%f, test aupr：%f， test roc：%f，test f1：%f" % (
    clf6.score(X_test, Y_test_1), auc( recall6,precision6),
    roc_auc_score(Y_test_1, pred6_proba), f1_score(Y_test_1, pred6)))
    pred7 = clf7.predict(X_test)
    pred7_proba = clf7.predict_proba(X_test)[:,1]
    #fpr7, tpr7, thresholds = roc_curve(Y_test_1, pred7_proba, pos_label=1)
    precision7, recall7, _ = precision_recall_curve(Y_test_1, pred7_proba)
    print("GaussianNB test accuracy：%f, test aupr：%f， test roc：%f，test f1：%f" % (
    clf7.score(X_test, Y_test_1), auc( recall7,precision7),
    roc_auc_score(Y_test_1, pred7_proba), f1_score(Y_test_1, pred7)))
    return [clf1.score(X_test, Y_test_1), auc( recall1,precision1),roc_auc_score(Y_test_1, pred1_proba), f1_score(Y_test_1, pred1)],\
           [clf2.score(X_test, Y_test_1), auc( recall2,precision2),roc_auc_score(Y_test_1, pred2_proba), f1_score(Y_test_1, pred2)],\
           [clf3.score(X_test, Y_test_1), auc( recall3,precision3),roc_auc_score(Y_test_1, pred3_proba), f1_score(Y_test_1, pred3)],\
           [clf4.score(X_test, Y_test_1), auc( recall4,precision4),roc_auc_score(Y_test_1, pred4_proba), f1_score(Y_test_1, pred4)],\
           [clf5.score(X_test, Y_test_1), auc( recall5,precision5),roc_auc_score(Y_test_1, pred5_proba), f1_score(Y_test_1, pred5)],\
           [clf6.score(X_test, Y_test_1), auc( recall6,precision6),roc_auc_score(Y_test_1, pred6_proba), f1_score(Y_test_1, pred6)],\
           [clf7.score(X_test, Y_test_1), auc( recall7,precision7),roc_auc_score(Y_test_1, pred7_proba), f1_score(Y_test_1, pred7)]

if __name__ == "__main__":
    args = parse_args()
    infile=args.input
    outfile = args.output
    train_model=args.train_model
    train_balance=args.train_balance
    pos=args.pos
    feature_type=args.feature_type
    kernel_size=args.kernel_size
    negative=args.negative
    draw=args.draw
    predict_train = args.predict
    print(train_balance)
    miRNA=load_data(infile,outfile,pos)
    label=load_label(infile,outfile)
    if train_balance:
        miRNA,label=balance(miRNA,label,negative)
    else:
        print('No balance')
    labels = np.asarray(pd.get_dummies(label), dtype=np.int8)
    print(labels.shape)
    print(miRNA.shape)
    if predict_train==1:
        _, X_vld, _, Y_vld = train_test_split(miRNA, labels, test_size=0.3, random_state=20, stratify=labels)
        X_test=miRNA
        X_train=miRNA
        Y_test=labels
        Y_train = labels
        Y_train_1 = np.argmax(Y_train, axis=1)
        Y_test_1 = np.argmax(Y_test, axis=1)
        Y_vld_1 = np.argmax(Y_vld, axis=1)
        n_future = len(miRNA[0])
        n_class = 2
        n_hidden = 16
        n_fc1 = 256
        if n_future % 2 == 0:
            future_out = n_future // 2
        else:
            future_out = n_future // 2 + 1

        train()
    else:
        if args.kfold == 0:
            X_train, X_, Y_train, Y_ = train_test_split(miRNA, labels, test_size=0.3, random_state=20, stratify=labels)
            X_test, X_vld, Y_test, Y_vld = train_test_split(X_, Y_, test_size=0.5, random_state=20, stratify=Y_)
            # 数据标准化处理
            # stdsc = StandardScaler()
            # X_train = stdsc.fit_transform(X_train)
            # X_test = stdsc.fit_transform(X_test)
            # X_vld = stdsc.fit_transform(X_vld)

            Y_train_1 = np.argmax(Y_train, axis=1)
            Y_test_1 = np.argmax(Y_test, axis=1)
            Y_vld_1 = np.argmax(Y_vld, axis=1)
            with open(outfile + 'Y_test_1.txt', 'w') as f:
                f.write(str(Y_test_1));
                f.close()
            with open(outfile + 'Y_train_1.txt', 'w') as f:
                f.write(str(Y_train_1));
                f.close()
            n_future = len(miRNA[0])
            n_class = 2
            n_hidden = 16
            n_fc1 = 256
            if n_future % 2 == 0:
                future_out = n_future // 2
            else:
                future_out = n_future // 2 + 1
            if train_model:
                sklearn_model()
            # 模型调用
            else:
                train()
            #
        else:
            kf = KFold(n_splits=args.kfold, shuffle=True, random_state=1)  # 初始化KFold
            clf0_kfold = []
            clf1_kfold = []
            clf2_kfold = []
            clf3_kfold = []
            clf4_kfold = []
            clf5_kfold = []
            clf6_kfold = []
            clf7_kfold = []
            recall_kfold = []
            precision_kfold = []
            fpr_kfold = []
            tpr_kfold = []
            i = 0
            mean_fpr = np.linspace(0, 1, 100)
            tprs = []
            mean_recall = np.linspace(0, 1, 100)
            precisions = []
            reversed_mean_precision = 0
            AUCs = []

            for training_index, testing_index in kf.split(miRNA):  # 调用split方法切分数据
                X_train, Y_train = miRNA[training_index], labels[training_index]
                X_test, Y_test = miRNA[testing_index], labels[testing_index]
                X_train, X_vld, Y_train, Y_vld = train_test_split(X_train, Y_train, test_size=0.2, random_state=20,
                                                                  stratify=Y_train)
                # 数据标准化处理
                # stdsc = StandardScaler()
                # X_train = stdsc.fit_transform(X_train)
                # X_test = stdsc.fit_transform(X_test)
                # X_vld = stdsc.fit_transform(X_vld)

                Y_train_1 = np.argmax(Y_train, axis=1)
                Y_test_1 = np.argmax(Y_test, axis=1)
                Y_vld_1 = np.argmax(Y_vld, axis=1)
                with open(outfile + 'Y_test_1.txt', 'w') as f:
                    f.write(str(Y_test_1));
                    f.close()
                with open(outfile + 'Y_train_1.txt', 'w') as f:
                    f.write(str(Y_train_1));
                    f.close()
                n_future = len(miRNA[0])
                n_class = 2
                n_hidden = 16
                n_fc1 = 256
                if n_future % 2 == 0:
                    future_out = n_future // 2
                else:
                    future_out = n_future // 2 + 1
                if train_model:
                    clf1, clf2, clf3, clf4, clf5, clf6, clf7 = sklearn_model()
                    clf1_kfold.append(clf1)
                    clf2_kfold.append(clf2)
                    clf3_kfold.append(clf3)
                    clf4_kfold.append(clf4)
                    clf5_kfold.append(clf5)
                    clf6_kfold.append(clf6)
                    clf7_kfold.append(clf7)
                # 模型调用
                else:
                    clf0, clf1, metric = train()
                    clf0_kfold.append(clf0)
                    clf1_kfold.append(clf1)
                    if draw == 1:
                        tprs.append(interp(mean_fpr, metric[2], metric[3]))
                        tprs[-1][0] = 0.0
                        plt.plot(metric[2], metric[3], lw=1, alpha=0.3,
                                 label='ROC fold %d(area=%0.2f)' % (i, auc(metric[2], metric[3])))
                        i += 1
                    else:
                        reversed_recall = np.fliplr([metric[0]])[0]
                        reversed_precision = np.fliplr([metric[1]])[0]
                        # reversed_mean_precision += interp(mean_recall, reversed_recall, reversed_precision)
                        # reversed_mean_precision[0] = 0.0
                        # AUCs.append(auc(metric[0], metric[1]))
                        # plt.plot(metric[0], metric[1], lw=1, alpha=0.3,
                        #         label='AUPR fold %d(area=%0.2f)' % (i, auc(reversed_recall, reversed_precision)))
                        # interp要求参数二序列递增，而precision_recall_curve返回的recall是递减的，因此要倒转，而为了不改变待插值曲线的方向，将precision也倒转
                        precisions.append(interp(mean_recall, reversed_recall, reversed_precision))
                        precisions[-1][0] = 1.0
                        plt.plot(metric[0], metric[1], lw=1, alpha=0.3,
                                 label='AUC fold %d(area=%0.2f)' % (i, auc(metric[0], metric[1])))
                        i += 1
                    # if recall_kfold!=[]:
                    #    if len(metric[0])==len(recall_kfold[0]):
                    #        recall_kfold.append(metric[0])
                    #    if len(metric[1]) == len(precision_kfold[0]):
                    #        precision_kfold.append(metric[1])
                    #    if len(metric[2]) == len(fpr_kfold[0]):
                    #        fpr_kfold.append(metric[2])
                    #    if len(metric[3])== len(tpr_kfold[0]):
                    #        tpr_kfold.append(metric[3])
                    # else:
                    #    recall_kfold.append(metric[0])
                    #    precision_kfold.append(metric[1])
                    #    fpr_kfold.append(metric[2])
                    #    tpr_kfold.append(metric[3])

            if train_model:
                print(np.mean(clf1_kfold, axis=0).shape)
                print("SVM kfold accuracy：%f, kfold aupr：%f， kfold roc：%f，kfold f1：%f" % (
                    np.mean(clf1_kfold, axis=0)[0], np.mean(clf1_kfold, axis=0)[1],
                    np.mean(clf1_kfold, axis=0)[2], np.mean(clf1_kfold, axis=0)[3]))
                print("Decision Trees kfold accuracy：%f, kfold aupr：%f， kfold roc：%f，kfold f1：%f" % (
                    np.mean(clf2_kfold, axis=0)[0], np.mean(clf2_kfold, axis=0)[1],
                    np.mean(clf2_kfold, axis=0)[2], np.mean(clf2_kfold, axis=0)[3]))
                print("LogisticRegression kfold accuracy：%f, kfold aupr：%f， kfold roc：%f，kfold f1：%f" % (
                    np.mean(clf3_kfold, axis=0)[0], np.mean(clf3_kfold, axis=0)[1],
                    np.mean(clf3_kfold, axis=0)[2], np.mean(clf3_kfold, axis=0)[3]))
                print("RandomForest kfold accuracy：%f, kfold aupr：%f， kfold roc：%f，kfold f1：%f" % (
                    np.mean(clf4_kfold, axis=0)[0], np.mean(clf4_kfold, axis=0)[1],
                    np.mean(clf4_kfold, axis=0)[2], np.mean(clf4_kfold, axis=0)[3]))
                print("MLP kfold accuracy：%f, kfold aupr：%f， kfold roc：%f，kfold f1：%f" % (
                    np.mean(clf5_kfold, axis=0)[0], np.mean(clf5_kfold, axis=0)[1],
                    np.mean(clf5_kfold, axis=0)[2], np.mean(clf5_kfold, axis=0)[3]))
                print("xgboost kfold accuracy：%f, kfold aupr：%f， kfold roc：%f，kfold f1：%f" % (
                    np.mean(clf6_kfold, axis=0)[0], np.mean(clf6_kfold, axis=0)[1],
                    np.mean(clf6_kfold, axis=0)[2], np.mean(clf6_kfold, axis=0)[3]))
                print("GaussianNB kfold accuracy：%f, test aupr：%f， kfold roc：%f，kfold f1：%f" % (
                    np.mean(clf7_kfold, axis=0)[0], np.mean(clf7_kfold, axis=0)[1],
                    np.mean(clf7_kfold, axis=0)[2], np.mean(clf7_kfold, axis=0)[3]))
                #
            else:
                print("CNN kfold accuracy：%f, kfold aupr：%f， kfold roc：%f，kfold f1：%f" % (
                    np.mean(clf0_kfold, axis=0)[0], np.mean(clf0_kfold, axis=0)[1],
                    np.mean(clf0_kfold, axis=0)[2], np.mean(clf0_kfold, axis=0)[3]))
                print("CNN+GaussianNB kfold accuracy：%f, kfold aupr：%f， kfold roc：%f，kfold f1：%f" % (
                    np.mean(clf1_kfold, axis=0)[0], np.mean(clf1_kfold, axis=0)[1],
                    np.mean(clf1_kfold, axis=0)[2], np.mean(clf1_kfold, axis=0)[3]))
                if draw == 1:
                    plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='Luck', alpha=.8)
                    mean_tpr = np.mean(tprs, axis=0)
                    mean_tpr[-1] = 1.0
                    mean_auc = auc(mean_fpr, mean_tpr)
                    std_auc = np.std(tprs, axis=0)
                    plt.plot(mean_fpr, mean_tpr, color='b', label=r'Mean ROC (area=%0.2f)' % mean_auc, lw=2, alpha=.8)
                    std_tpr = np.std(tprs, axis=0)
                    tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
                    tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
                    plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='gray', alpha=.2)
                    plt.xlim([-0, 1.05])
                    plt.ylim([-0, 1.05])
                    plt.xlabel('False Positive Rate')
                    plt.ylabel('True Positive Rate')
                    plt.title('ROC')
                    plt.legend(loc='lower right')
                    #plt.show()
                    ticks = str(time.time()).replace('.', '')
                    plt.savefig(outfile + "AUROC-" + ticks + '.pdf')
                else:
                    mean_precision = np.mean(precisions, axis=0)
                    # mean_precision[-1] = 1.0
                    mean_aupr = auc(mean_recall, mean_precision)
                    std_aupr = np.std(precisions, axis=0)
                    std_precision = np.std(mean_precision, axis=0)
                    # mean_precision = np.fliplr([mean_precision])[0]
                    plt.plot(mean_recall, mean_precision, color='g', label=r'Mean AUPR (area=%0.2f)' % mean_aupr, lw=2,
                             alpha=.8)

                    # reversed_mean_precision /= 5
                    # reversed_mean_precision[0] = 1

                    precisions_upper = np.minimum(mean_precision + std_precision, 1)
                    precisions_lower = np.maximum(mean_precision - std_precision, 0)
                    # mean_auc_pr = auc(mean_recall, mean_precision)
                    # plt.plot(mean_recall, np.fliplr([reversed_mean_precision])[0], 'g',
                    #         label='Mean AUC (area = %0.2f)' % mean_auc_pr, lw=2)
                    plt.fill_between(mean_recall, precisions_lower, precisions_upper, color='gray', alpha=.2)
                    plt.xlim([0, 1.05])
                    plt.ylim([0, 1.05])
                    plt.xlabel('Recall')
                    plt.ylabel('Precision')
                    plt.title('Precision Recall')
                    plt.legend(loc="lower right")
                    plt.show()

                    ticks = str(time.time()).replace('.', '')
                    plt.savefig(outfile + "AUPR-" + ticks + '.pdf')
                    # print(sum(AUCs) / float(len(AUCs)))

                # plot_final(fpr_kfold, tpr_kfold, recall_kfold, precision_kfold,np.mean(clf0_kfold,axis =0)[1] , np.mean(clf0_kfold,axis =0)[2],outfile)

