import pandas as pd
from sklearn.neural_network import MLPClassifier

from tflearn import data_utils, conv_1d, merge, regression
import tflearn
import numpy as np
from sklearn.model_selection import train_test_split
from tflearn.layers.conv import global_max_pool
from tflearn.layers.recurrent import bidirectional_rnn, BasicLSTMCell, GRUCell
from tflearn.layers.core import input_data, dropout, fully_connected
from sklearn import metrics, svm
import tensorflow as tf
import os

os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"

# path = "./data/rawdata.csv"
path = "D:\\learn\\school\\code\\myfinalpaper\\data\\rawdata20201226.csv"
df = pd.read_csv(path)
x = df['data']
y = df['label']

number_nomal = df[df["label"] == 1]  # 正常脚本数据
print(number_nomal.shape)
number_webshell = df[df["label"] == 0]  # webshell脚本数据
print(number_webshell.shape)
# normal_sample = number_nomal.sample(20)  # 抽取n_numbe白个样本
# webshell_sample = number_webshell.sample(10)  # 抽取b_number个黑样本
# new_df = pd.concat([normal_sample, webshell_sample])
# x = new_df['data']
# y = new_df['label']
vocabulary = data_utils.VocabularyProcessor(max_document_length=400)  # 每条语句最大为400个词
x = vocabulary.fit_transform(x)
x = np.array(list(x))

# 划分测试集和训练集
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
# 把标记数据二值化
testy = y_test
y_train = data_utils.to_categorical(y_train, 2)
y_test = data_utils.to_categorical(y_test, 2)

def do_metrics(y_test_truth, y_test_pred):
    print("metrics.accuracy_score:")
    print(metrics.accuracy_score(y_test_truth, y_test_pred))
    print("metrics.confusion_matrix:")
    print(metrics.confusion_matrix(y_test_truth, y_test_pred))
    print("metrics.precision_score:")
    print(metrics.precision_score(y_test_truth, y_test_pred))
    print("metrics.recall_score:")
    print(metrics.recall_score(y_test_truth, y_test_pred))
    print("metrics.f1_score:")
    print(metrics.f1_score(y_test_truth, y_test_pred))
    TN = metrics.confusion_matrix(y_test_truth, y_test_pred)[0, 0]
    FP = metrics.confusion_matrix(y_test_truth, y_test_pred)[0, 1]
    FN = metrics.confusion_matrix(y_test_truth, y_test_pred)[1, 0]
    TP = metrics.confusion_matrix(y_test_truth, y_test_pred)[1, 1]
    print("TN: " + str(TN))
    print("FP: " + str(FP))
    print("FN: " + str(FN))
    print("TP: " + str(TP))
    print("真正率TPR: " + str(TP/(TP+FN)))
    print("假正率FPR漏报率: " + str(FP/(FP+TN)))
    print("假负率FNR误报率: " + str(FN/(TP+FN)))
    print("真负率TNR: " + str(TN/(TN+FP)))


def model_prediction(model, name):
    y_predict_list = model.predict(x_test)
    y_predict = []
    for i in y_predict_list:
        if i[0] > 0.5:
            y_predict.append(0)
        else:
            y_predict.append(1)
    y_test_truth = y_test[:, 1]
    do_metrics(testy, y_predict)
    dataframe = pd.DataFrame({'y_test_truth': y_test_truth,
                              'y_predict_score': y_predict_list[:, 1], #这个才是最终的末班内容，不要再修改其中的值了
                              'y_predict_label': y_predict})
    dataframe.to_csv(name + '.csv', sep=',', index=False)


def lstm(input_dim, learning_rate, batch_size):
    net = tflearn.input_data([None, 400])
    net = tflearn.embedding(net, input_dim=input_dim, output_dim=128)
    net = tflearn.lstm(net, 128, dropout=0.8)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate,
                             loss='categorical_crossentropy')

    # 开始训练
    model = tflearn.DNN(net, tensorboard_verbose=0)
    model.fit(x_train, y_train, n_epoch=5, shuffle=True, validation_set=(x_test, y_test), show_metric=True,
              batch_size=batch_size)
    print("*****************************************")
    print("用lstm模型预测")
    model_prediction(model, "lstm")
    print("预测结束")
    print("*****************************************")


def gru(input_dim, learning_rate, batch_size):
    net = tflearn.input_data([None, 400])
    net = tflearn.embedding(net, input_dim=input_dim, output_dim=128)
    net = tflearn.gru(net, 128, dropout=0.8)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate,
                             loss='categorical_crossentropy')

    # 开始训练
    model = tflearn.DNN(net, tensorboard_verbose=0)
    model.fit(x_train, y_train, n_epoch=5, shuffle=True, validation_set=(x_test, y_test), show_metric=True,
              batch_size=batch_size)
    print("*****************************************")
    print("用gru模型预测")
    model_prediction(model, "gru")
    print("预测结束")
    print("*****************************************")


def bilstm(input_dim, learning_rate, batch_size):
    net = tflearn.input_data([None, 400])
    net = tflearn.embedding(net, input_dim=input_dim, output_dim=128)
    net = bidirectional_rnn(net, BasicLSTMCell(128), BasicLSTMCell(128))
    net = dropout(net, 0.8)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate,
                             loss='categorical_crossentropy')

    # 开始训练
    model = tflearn.DNN(net, tensorboard_verbose=0)
    model.fit(x_train, y_train, n_epoch=5, shuffle=True, validation_set=(x_test, y_test), show_metric=True,
              batch_size=batch_size)

    print("*****************************************")
    print("用bilstm模型预测")
    model_prediction(model, "bilstm")
    print("预测结束")
    print("*****************************************")

def bigru(input_dim, learning_rate, batch_size):
    net = tflearn.input_data([None, 400])
    net = tflearn.embedding(net, input_dim=input_dim, output_dim=128)
    net = bidirectional_rnn(net, GRUCell(128), GRUCell(128))
    net = dropout(net, 0.8)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate,
                             loss='categorical_crossentropy')
    # 开始训练
    model = tflearn.DNN(net, tensorboard_verbose=0)
    model.fit(x_train, y_train, n_epoch=5, shuffle=True, validation_set=(x_test, y_test), show_metric=True,
              batch_size=batch_size)


    print("*****************************************")
    print("用bigru模型预测")
    model_prediction(model, "bigru")
    print("预测结束")
    print("*****************************************")

def do_mlp(x,y):
    #mlp
    clf = MLPClassifier(solver='lbfgs',
                        alpha=1e-5,
                        hidden_layer_sizes=(5, 2),
                        random_state=1)

    # 划分测试集和训练集
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=0)
    clf.fit(x_train, y_train)
    y_pred = clf.predict(x_test)
    print("*****************************************")
    print("用mlp模型预测")
    do_metrics(y_test,y_pred)
    print("预测结束")
    print("*****************************************")
    dataframe = pd.DataFrame({'y_test_truth': y_test,
                              'y_predict_score': clf.predict_proba(x_test)[:, 1],
                              'y_predict_label': y_pred})
    dataframe.to_csv("mlp" + '.csv', sep=',', index=False)

def do_svm(x, y):
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)
    clf = svm.SVC(probability=True)
    clf.fit(x_train, y_train)
    y_pred = clf.predict(x_test)
    print("*****************************************")
    print("用svm模型预测")
    do_metrics(y_test,y_pred)
    print("预测结束")
    print("*****************************************")
    dataframe = pd.DataFrame({'y_test_truth': y_test,
                              'y_predict_score': clf.predict_proba(x_test)[:, 1],
                              'y_predict_label': y_pred})
    dataframe.to_csv("svm" + '.csv', sep=',', index=False)

def do_cnn(input_dim, learning_rate, batch_size):
    network = input_data(shape=[None, 400], name='input')
    network = tflearn.embedding(network, input_dim=input_dim, output_dim=128)
    branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
    branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
    branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = tf.expand_dims(network, 2)
    network = global_max_pool(network)
    network = dropout(network, 0.5)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=learning_rate,
                         loss='categorical_crossentropy', name='target')

    # 开始训练
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit(x_train, y_train, n_epoch=5, shuffle=True, validation_set=(x_test, y_test), show_metric=True,
              batch_size=batch_size)

    print("*****************************************")
    print("cnn")
    model_prediction(model, "cnn")
    print("预测结束")
    print("*****************************************")



if __name__ == '__main__':
    # lstm(input_dim=20480, learning_rate=0.001, batch_size=32)
    #bilstm(input_dim=20480, learning_rate=0.001, batch_size=32)
    gru(input_dim=20480, learning_rate=0.001, batch_size=32)
    #bigru(input_dim=20480, learning_rate=0.001, batch_size=32)
    #do_mlp(x, y)
    do_svm(x, y)
    #do_cnn(input_dim=20480, learning_rate=0.001, batch_size=32)