import argparse
import random

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.manifold import TSNE
from sklearn.naive_bayes import GaussianNB
from sklearn import (svm,tree)
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from sklearn.neural_network import MLPClassifier
import time

def parse_args():
    parser = argparse.ArgumentParser()

    parser.add_argument('--input', type=str, default='E:\\experiment\\data\\rna-multiple-sclerosis\\',#/mnt/5280d/twang/xyf/rna-multiple-sclerosis/
                        help='Input dataset path')
    parser.add_argument('--kernel_size', type=int, default=3,
                        help='kernel size')
    parser.add_argument('--filters', type=int, default=1,
                        help='filters')
    parser.add_argument('--pos', type=int, default=3,
                        help='feature type')
    parser.add_argument('--output', type=str, default='E:\\experiment\\wang\\output\\',#/mnt/5280d/twang/xyf/rna-multiple-sclerosis/output/
                        help='Output dataset path')
    parser.add_argument('--feature-type', type=str, default='Node2vec',
                        help='feature type')
    parser.add_argument('--epoch', type=int, default=2,
                        help='Number of epoch. Default is 2.')
    parser.add_argument('--batch_size', type=int, default=64,
                        help='Number of batch_size. Default is 64.')
    parser.add_argument('--train-model', type=bool, default=False,
                        help='train model')
    parser.add_argument('--train-balance', default=False, action='store_true',
                        help='train balance')
    parser.add_argument('--kfold', type=int, default=5,
                        help='Number of kfold. Default is 5.')
    parser.add_argument('--negative', type=int, default=1,
                        help='Number of negative sample. Default is 1.')
    parser.add_argument('--draw', type=int, default=1,
                        help='draw AUROC or AUPR. Default is 1.')
    parser.add_argument('--predict', type=int, default=0,
                        help='predict or train. Default is 0.')
    return parser.parse_args()

def load_feature_data(f_name):
    feature_dic = {}
    with open(f_name, 'r') as f:
        first = True
        for line in f:#跳过第一行不读
            if first:
                first = False
                continue
            items = line.strip().split()
            feature_dic[items[0]] = items[1:]
    return feature_dic

def load_dic(f_name):
    dic = {}
    with open(f_name, 'r') as f:
        for line in f:
            items = line.strip().split()
            dic[items[0]] = items[1:]
    return dic

def load_label(infile,outfile):
    df = pd.read_table(infile+'2/mirna_disease.txt',delimiter='\t')
    df = df.loc[:, ['mir', 'disease']]
    df=df[df['disease']=='Multiple Sclerosis']
    for date, row in df.iterrows():
        row['mir']=row['mir'].replace('mir','miR')
    df.to_csv(outfile+'/miRNA_disease.txt', header=True,sep="\t")#103
    vocab = load_dic(infile+'2//rna.txt')
    for key in vocab:
        vocab[key] = ''.join(vocab[key]).replace('-3p','')
        vocab[key] = ''.join(vocab[key]).replace('-5p', '')
    label=[]
    num=0
    for key in vocab:
        label.append(0)
        for date, row in df.iterrows():#有重复值，因为去掉了后缀
            if row['mir']==vocab[key]:
                row['mir']=key
                label[-1]=1
                num=num+1
    df.to_csv(outfile+'miRNA_disease_num.txt', header=True, sep="\t")
    with open(outfile+'data.txt', 'w') as f:
        f.write(str(dict(zip(range(1,len(label)+1),label))));
        f.close()
    print('pos label number:',num)#有重复值，因为去掉了后缀
    print('load label finish')
    return label

def balance(miRNA,label,negative):
    print('STAR balance')
    j=0

    balance_label=[]
    shape=[56*(1+negative),len(list(miRNA)[0])]
    balance_miRNA=np.zeros(shape, dtype=np.float32)
    k=0
    for i in range(len(label)):
        if (label[i]==1):
            #del(balance_label[i])
            balance_label.append(label[i])
            balance_miRNA[j,:]=miRNA[i,:]
            k=k+1
    print(k)
    negflag=[]
    for i in range(len(label)):
    #    balance_miRNA[k, :] = miRNA[i, :]
    #    k = k + 1
    #    j = j + 1
    #if j >= 56:
     #   if j >= 56 * negative:
     #       break
        if(label[i]==0):
            #del(balance_label[i])
            negflag.append(i)

    nflag=random.sample(negflag,56*negative)
    for i in range(len(nflag)):
        balance_label.append(label[i])
        balance_miRNA[k, :] = miRNA[i, :]
        k = k + 1
    #print(k)
    print("all node number:%d"%len(balance_miRNA))
    return balance_miRNA,balance_label

def plot_final(fpr_kfold, tpr_kfold, recall_kfold, precision_kfold,aupr,roc_auc,outfile):
    try:
        mean_fpr=np.mean(fpr_kfold,axis =0)
        mean_tpr=np.mean(tpr_kfold,axis =0)
        tprs_lower=np.min(tpr_kfold,axis =0)
        tprs_upper=np.max(tpr_kfold,axis =0)
        mean_rs=np.mean(recall_kfold,axis =0)
        mean_ps=np.mean(precision_kfold,axis =0)
    except ValueError:
        try:
            print(mean_fpr)
            #print(np.mean(fpr_kfold,axis =0))
        except (AttributeError,ValueError):
            print(type(mean_fpr))
            #print(type(np.mean(fpr_kfold,axis =0)))
    with open(outfile + 'mean_fpr.txt', 'w') as f:
            f.write(str(mean_fpr));
            f.close()
    with open(outfile + 'tprs_lower.txt', 'w') as f:
        f.write(str(tprs_lower));
        f.close()
    with open(outfile + 'tprs_upper.txt', 'w') as f:
        f.write(str(tprs_upper));
        f.close()
    with open(outfile + 'mean_tpr.txt', 'w') as f:
        f.write(str(mean_tpr));
        f.close()
    font1 = {'family': 'Times New Roman',
             'weight': 'normal',
             'size': 30,
             }
    fig = plt.figure(figsize=(50, 25))
    tick = [0, 0.2, 0.4, 0.6, 0.8, 1]
    ax1 = plt.subplot(1, 2, 1)
    # 第一行第二列图形
    ax2 = plt.subplot(1, 2, 2)
    plt.sca(ax1)
    plt.plot(mean_fpr, mean_tpr, lw=2, color='darkorange', label='ROC(area = {})'.format(roc_auc))
    plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='red', alpha=.2,
                     label=r'$\pm$ 1 std. dev.')
    plt.xlim([0.0, 1.0]);
    plt.ylim([0.0, 1.05]);
    plt.xlabel('False Positive Rate', font1);
    plt.xticks(tick, fontsize=20, color='#000000')
    plt.yticks(tick, fontsize=20, color='#000000')
    plt.ylabel('True Positive Rate', font1);
    plt.title('ROC Curve', font1);
    plt.legend(loc="lower right", fontsize=20);

    plt.sca(ax2)
    plt.plot(mean_rs, mean_ps, lw=2, label='AUPR(area = {})'.format(aupr))
    plt.xticks(tick, fontsize=20, color='#000000')
    plt.yticks(tick, fontsize=20, color='#000000')
    plt.xlim([0.0, 1.0]);
    plt.ylim([0.0, 1.05]);
    plt.ylabel('Precision', font1);
    plt.xlabel('Recall', font1);
    plt.title('PR Curve', font1);
    plt.legend(loc="lower right", fontsize=20)
    plt.savefig(outfile + "AUPR-AUROC" + '.png')

def plot(miRNA,X_train,y_train,X_test,y_test,X_vld,Y_vld,outfile):
    names = ["Decision Trees", "LogisticRegression", "RandomForest",
             "MLP", "xgboost", "GaussianNB"]
    model = TSNE(n_components=2)
    X=model.fit_transform(miRNA)
    X_train= model.fit_transform(X_train)
    #y_train = model.fit_transform(y_train)
    X_test = model.fit_transform(X_test)
    #y_test = model.fit_transform(y_test)
    X_vld = model.fit_transform(X_vld)
    #Y_vld = model.fit_transform(Y_vld)
    h=.02
    figure = plt.figure(figsize=(27, 9))
    clf = svm.SVC(C=0.9, kernel='linear')  # linear kernel
    # SVM训练只取一部分数据
    clf.fit(X_train[:], y_train[:])

    clf2 = tree.DecisionTreeClassifier(criterion='entropy', min_samples_leaf=3)
    clf2.fit(X_train[:], y_train[:])

    clf3 = LogisticRegression(max_iter=1000)
    clf3.fit(X_train[:], y_train[:])

    clf4 = RandomForestClassifier(n_estimators=160)
    clf4.fit(X_train[:], y_train[:])

    clf5 = MLPClassifier(activation='relu', alpha=1e-05, batch_size='auto', beta_1=0.9,
       beta_2=0.999, early_stopping=False, epsilon=1e-08,
       hidden_layer_sizes=[1], learning_rate='constant',
       learning_rate_init=0.001, max_iter=1000, momentum=0.9,
       nesterovs_momentum=True, power_t=0.5, random_state=1, shuffle=True,
       solver='adam', tol=0.0001, validation_fraction=0.1, verbose=False,
       warm_start=False)
    clf5.fit(X_train[:], y_train[:])
    print('层数：%s，输出单元数量：%s' % (clf5.n_layers_, clf5.n_outputs_))

    clf6 = XGBClassifier(max_depth=5,learning_rate=0.1,n_estimators=50,eval_metric=['logloss','auc','error'], use_label_encoder=False,objective='binary:logistic')
    clf6.fit(X_train[:], y_train[:])
    clf7 = GaussianNB()
    clf7.fit(X_train[:], y_train[:])
    classifiers = [clf2, clf3, clf4, clf5, clf6, clf7]
    i = 1
    datasets=[[X_train,y_train],[X_test,y_test],[X_vld,Y_vld]]
    for ds_cnt, ds in enumerate(datasets):
        x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
        y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
        xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                             np.arange(y_min, y_max, h))
        cm = plt.cm.RdBu
        cm_bright = ListedColormap(['#FF0000', '#0000FF'])
        ax = plt.subplot(len(datasets), len(names)+1, i)
        if ds_cnt == 0:
            ax.set_title("Input data")
        # Plot the training points
        ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
        # and testing points
        ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
        ax.set_xlim(xx.min(), xx.max())
        ax.set_ylim(yy.min(), yy.max())
        ax.set_xticks(())
        ax.set_yticks(())
        i += 1

        # iterate over classifiers
        for name, clf in zip(names, classifiers):
            ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
            clf.fit(X_train, y_train)
            score = clf.score(X_test, y_test)

            # Plot the decision boundary. For that, we will assign a color to each
            # point in the mesh [x_min, m_max]x[y_min, y_max].
            if hasattr(clf, "decision_function"):
                Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
            else:
                Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]

            # Put the result into a color plot
            Z = Z.reshape(xx.shape)
            ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)

            # Plot also the training points
            ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
            # and testing points
            ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
                       alpha=0.6)

            ax.set_xlim(xx.min(), xx.max())
            ax.set_ylim(yy.min(), yy.max())
            ax.set_xticks(())
            ax.set_yticks(())
            if ds_cnt == 0:
                ax.set_title(name)
            ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
                    size=15, horizontalalignment='right')
            i += 1
    plt.tight_layout()
    ticks = str(time.time()).replace('.','')
    plt.savefig(outfile+"clf"+ticks+'.png')
    #plt.show()


