import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn import metrics
from tensorflow import keras
from tensorflow.keras import regularizers, Input, Model
from tensorflow.keras.layers import Embedding, GlobalMaxPooling1D, Concatenate, Dropout, Dense, Lambda, LSTM, MaxPooling1D, GRU
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np

from keras.utils.vis_utils import plot_model


import os
os.environ["path"] = os.pathsep + 'F:\Program Files\Graphviz\bin'
from tensorflow.python.keras import Sequential
from tensorflow.python.keras.layers import Conv1D, SimpleRNN

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

# 在服务上跑的数据是20201226日生成的data
# 服务器的path的内容
# path = "rawdata2020122602.csv"

path = "D:/learn/school/code/myfinalpaper/data/rawdata20201226.csv"
df = pd.read_csv(path)
x = df['data']
y = df['label']

tokenizer = keras.preprocessing.text.Tokenizer(
    filters='!"#$%&()*+,-./:;<=>?@[\\]^`{|}~\t\n')  # 创建一个Token，用来讲文本的词汇转回为索引数字,为了防止
# tokenizer = keras.preprocessing.text.Tokenizer()
tokenizer.fit_on_texts(x)
# vocab = tokenizer.word_index  # 得到每个词的编号

x_id = tokenizer.texts_to_sequences(x)

x = keras.preprocessing.sequence.pad_sequences(x_id, maxlen=400, padding='post', truncating='post')

x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)

y_train_class = tf.keras.utils.to_categorical(y_train, num_classes=2)

y_test_class = tf.keras.utils.to_categorical(y_test, num_classes=2)


def do_metrics(y_test_truth, y_test_pred):
    print("metrics.accuracy_score:")
    print(metrics.accuracy_score(y_test_truth, y_test_pred))
    print("metrics.confusion_matrix:")
    print(metrics.confusion_matrix(y_test_truth, y_test_pred))
    print("metrics.precision_score:")
    print(metrics.precision_score(y_test_truth, y_test_pred))
    print("metrics.recall_score:")
    print(metrics.recall_score(y_test_truth, y_test_pred))
    print("metrics.f1_score:")
    print(metrics.f1_score(y_test_truth, y_test_pred))
    TN = metrics.confusion_matrix(y_test_truth, y_test_pred)[0, 0]
    FP = metrics.confusion_matrix(y_test_truth, y_test_pred)[0, 1]
    FN = metrics.confusion_matrix(y_test_truth, y_test_pred)[1, 0]
    TP = metrics.confusion_matrix(y_test_truth, y_test_pred)[1, 1]
    print("TN: " + str(TN))
    print("FP: " + str(FP))
    print("FN: " + str(FN))
    print("TP: " + str(TP))
    print("真正率TPR: " + str(TP/(TP+FN)))
    print("假正率FPR漏报率: " + str(FP/(FP+TN)))
    print("假负率FNR误报率: " + str(FN/(TP+FN)))
    print("真负率TNR: " + str(TN/(TN+FP)))



def model_prediction(model, name, x_test, y_test):
    y_predict_list = model.predict(x_test)
    y_predict = []
    for i in y_predict_list:
        if i[0] > 0.5:
            y_predict.append(0)
        else:
            y_predict.append(1)
    y_test_truth = y_test[:, 1]
    do_metrics(y_test_truth, y_predict)
    dataframe = pd.DataFrame({'y_test_truth': y_test_truth,
                              'y_predict_score': y_predict_list[:, 1],  # 这个才是最终的内容，不要再修改其中的值了
                              'y_predict_label': y_predict})
    dataframe.to_csv("./" + name + "/" + name + '.csv', sep=',', index=False)


def do_crnn():
    # os.mkdir("crnn")
    print("trainning CRnnCNN")
    print(x_train.shape)
    # crnn_model = CRNN(400, 204800, 128).get_model()
    # crnn_model.compile(loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
    #                   optimizer=tf.keras.optimizers.Adam(0.001),
    #                   metrics=['accuracy'])
    # crnn_model.fit(x_train,
    #               y_train_class,
    #               epochs=5,
    #               validation_data=(x_test, y_test_class),
    #               batch_size=64,
    #               shuffle=True
    #               )

    crnn_model = Sequential()
    crnn_model.add(Embedding(20480, 400))
    crnn_model.add(Conv1D(128, 3, padding='valid', activation='tanh'
    ))
    crnn_model.add(MaxPooling1D(pool_size=3))
    crnn_model.add(keras.layers.Bidirectional(GRU(128)))
    crnn_model.add(Dropout(0.2))
    crnn_model.add(Dense(2, activation='softmax'))
    crnn_model.summary()

    plot_model(crnn_model, to_file='crnn.png', show_shapes=True)

    crnn_model.compile(loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
                  optimizer=tf.keras.optimizers.Adam(0.001),
                  metrics=['accuracy'])
    crnn_model.fit(x_train,
              y_train_class,
              epochs=5,
              validation_data=(x_test, y_test_class),
              batch_size=64,
              shuffle=True
              )

    print("*****************************************")
    print("用crnn模型进行预测")
    model_prediction(crnn_model, "crnn", x_test, y_test_class)
    tf.saved_model.save(crnn_model, "./crnn/saved/1")
    print("crnn预测结束")
    print("*****************************************")

class CRNN(object):
    def __init__(self, maxlen, input_dim, output_dim):
        self.maxlen = maxlen
        self.input_dim = input_dim
        self.output_dim = output_dim

    def get_model(self):
        inputs = Input(shape=(self.maxlen,))
        embedding = Embedding(self.input_dim, self.output_dim)(inputs)
        convs = []
        for kernel_size in [3, 4, 5]:
            c = Conv1D(128, kernel_size, padding='same', activation='relu', kernel_regularizer=regularizers.l2(0.001))(
                embedding)
            # c = GlobalMaxPooling1D()(c)
            convs.append(c)
        #
        # x = Conv1D(128, 3, padding='valid', activation='relu', kernel_regularizer=regularizers.l2(0.001))(
        #     embedding)
        x = Concatenate(axis=1)(convs)
        x = MaxPooling1D(pool_size=3)(x)

        # Bi-Lstm
        x = keras.layers.Bidirectional(LSTM(units=128,
                                     # return_sequences=True,
                                     activation='relu',
                                     kernel_regularizer=regularizers.l2(0.001),
                                     recurrent_regularizer=regularizers.l2(0.001)
                                     ))(x)
        # x = LSTM(units=128, activation='tanh')(x)

        x = Dropout(0.5)(x)

        outputs = Dense(2, activation='softmax')(x)
        model = Model(inputs = inputs, outputs = outputs)
        model.summary()
        return model

if __name__ == "__main__":
    do_crnn()