import tensorflow as tf
from sklearn import metrics
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras import regularizers, Input, Model
from tensorflow.keras.layers import Embedding, GlobalMaxPooling1D, Concatenate, Dropout, Dense, Lambda
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np

import os

from tensorflow.python.keras.layers import Conv1D, SimpleRNN

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

# 在服务上跑的数据是20201226日生成的data
# 服务器的path的内容
# path = "rawdata2020122602.csv"

path = "D:/learn/school/code/myfinalpaper/data/rawdata20201226.csv"
df = pd.read_csv(path)
x = df['data']
y = df['label']

tokenizer = keras.preprocessing.text.Tokenizer(
    filters='!"#$%&()*+,-./:;<=>?@[\\]^`{|}~\t\n')  # 创建一个Token，用来讲文本的词汇转回为索引数字,为了防止
# tokenizer = keras.preprocessing.text.Tokenizer()
tokenizer.fit_on_texts(x)
# vocab = tokenizer.word_index  # 得到每个词的编号

x_id = tokenizer.texts_to_sequences(x)

x = keras.preprocessing.sequence.pad_sequences(x_id, maxlen=400, padding='post', truncating='post')

x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)

y_train_class = tf.keras.utils.to_categorical(y_train, num_classes=2)

y_test_class = tf.keras.utils.to_categorical(y_test, num_classes=2)

def do_metrics(y_test_truth, y_test_pred):
    print("metrics.accuracy_score:")
    print(metrics.accuracy_score(y_test_truth, y_test_pred))
    print("metrics.confusion_matrix:")
    print(metrics.confusion_matrix(y_test_truth, y_test_pred))
    print("metrics.precision_score:")
    print(metrics.precision_score(y_test_truth, y_test_pred))
    print("metrics.recall_score:")
    print(metrics.recall_score(y_test_truth, y_test_pred))
    print("metrics.f1_score:")
    print(metrics.f1_score(y_test_truth, y_test_pred))
    TN = metrics.confusion_matrix(y_test_truth, y_test_pred)[0, 0]
    FP = metrics.confusion_matrix(y_test_truth, y_test_pred)[0, 1]
    FN = metrics.confusion_matrix(y_test_truth, y_test_pred)[1, 0]
    TP = metrics.confusion_matrix(y_test_truth, y_test_pred)[1, 1]
    print("TN: " + str(TN))
    print("FP: " + str(FP))
    print("FN: " + str(FN))
    print("TP: " + str(TP))
    print("真正率TPR: " + str(TP/(TP+FN)))
    print("假正率FPR漏报率: " + str(FP/(FP+TN)))
    print("假负率FNR误报率: " + str(FN/(TP+FN)))
    print("真负率TNR: " + str(TN/(TN+FP)))


def model_prediction(model, name, x_test, y_test):
    y_predict_list = model.predict(x_test)
    y_predict = []
    for i in y_predict_list:
        if i[0] > 0.5:
            y_predict.append(0)
        else:
            y_predict.append(1)
    y_test_truth = y_test[:, 1]
    do_metrics(y_test_truth, y_predict)
    dataframe = pd.DataFrame({'y_test_truth': y_test_truth,
                              'y_predict_score': y_predict_list[:, 1],  # 这个才是最终的内容，不要再修改其中的值了
                              'y_predict_label': y_predict})
    dataframe.to_csv("./" + name + "/" + name + '.csv', sep=',', index=False)


def do_text_cnn():
    os.mkdir("cnn_model")
    print("trainning TextCNN")
    print(x_train.shape)
    cnn_model = TextCNN(400, 204800, 128).get_model()
    cnn_model.compile(loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
                      optimizer=tf.keras.optimizers.Adam(0.001),
                      metrics=['accuracy'])
    cnn_model.fit(x_train,
                  y_train_class,
                  epochs=5,
                  validation_data=(x_test, y_test_class),
                  batch_size=64,
                  shuffle=True
                  )
    print("*****************************************")
    print("用cnn_model模型预测")
    model_prediction(cnn_model, "cnn_model", x_test, y_test_class)
    tf.saved_model.save(cnn_model, "./cnn_model/saved/1")
    print("cnn_model预测结束")
    print("*****************************************")

class TextCNN:
    def __init__(self, maxlen, input_dim, output_dim):
        self.maxlen = maxlen  # 文本的最长长度
        self.input_dim = input_dim
        self.output_dim = output_dim

    def get_model(self):
        inputs = Input(shape=(self.maxlen,))
        embedding = Embedding(self.input_dim, self.output_dim)(inputs)
        convs = []
        for kernel_size in [3, 4, 5]:
            c = Conv1D(128, kernel_size, padding='valid', activation='relu', kernel_regularizer=regularizers.l2(0.001))(
                embedding)
            c = GlobalMaxPooling1D()(c)
            convs.append(c)
        concat = Concatenate(axis=1)(convs)
        x = Dropout(0.5)(concat)
        outputs = Dense(2, activation='softmax')(x)
        model = Model(inputs=inputs, outputs=outputs)
        model.summary()
        return model

if __name__ == "__main__":
    do_text_cnn()
    # do_rcnn()
