from tensorflow.keras import Model
from tensorflow import keras
from tensorflow.keras.layers import Embedding, Dense, Concatenate, Conv1D, Bidirectional, LSTM, GlobalAveragePooling1D, GlobalMaxPooling1D
from sklearn.model_selection import train_test_split
from sklearn import metrics
import pandas as pd
import tensorflow as tf
import os

from tensorflow.python.keras import regularizers

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

# 在服务上跑的数据是20201226日生成的data
# 服务器的path的内容
# path = "rawdata2020122602.csv"

path = "D:/learn/school/code/myfinalpaper/data/rawdata20201226.csv"
df = pd.read_csv(path)
x = df['data']
y = df['label']

tokenizer = keras.preprocessing.text.Tokenizer(
    filters='!"#$%&()*+,-./:;<=>?@[\\]^`{|}~\t\n')  # 创建一个Token，用来讲文本的词汇转回为索引数字,为了防止
# tokenizer = keras.preprocessing.text.Tokenizer()
tokenizer.fit_on_texts(x)
# vocab = tokenizer.word_index  # 得到每个词的编号

x_id = tokenizer.texts_to_sequences(x)

x = keras.preprocessing.sequence.pad_sequences(x_id, maxlen=400, padding='post', truncating='post')

x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)

y_train_class = tf.keras.utils.to_categorical(y_train, num_classes=2)

y_test_class = tf.keras.utils.to_categorical(y_test, num_classes=2)


def do_metrics(y_test_truth, y_test_pred):
    print("metrics.accuracy_score:")
    print(metrics.accuracy_score(y_test_truth, y_test_pred))
    print("metrics.confusion_matrix:")
    print(metrics.confusion_matrix(y_test_truth, y_test_pred))
    print("metrics.precision_score:")
    print(metrics.precision_score(y_test_truth, y_test_pred))
    print("metrics.recall_score:")
    print(metrics.recall_score(y_test_truth, y_test_pred))
    print("metrics.f1_score:")
    print(metrics.f1_score(y_test_truth, y_test_pred))
    TN = metrics.confusion_matrix(y_test_truth, y_test_pred)[0, 0]
    FP = metrics.confusion_matrix(y_test_truth, y_test_pred)[0, 1]
    FN = metrics.confusion_matrix(y_test_truth, y_test_pred)[1, 0]
    TP = metrics.confusion_matrix(y_test_truth, y_test_pred)[1, 1]
    print("TN: " + str(TN))
    print("FP: " + str(FP))
    print("FN: " + str(FN))
    print("TP: " + str(TP))
    print("真正率TPR: " + str(TP/(TP+FN)))
    print("假正率FPR漏报率: " + str(FP/(FP+TN)))
    print("假负率FNR误报率: " + str(FN/(TP+FN)))
    print("真负率TNR: " + str(TN/(TN+FP)))



def model_prediction(model, name, x_test, y_test):
    y_predict_list = model.predict(x_test)
    y_predict = []
    for i in y_predict_list:
        if i[0] > 0.5:
            y_predict.append(0)
        else:
            y_predict.append(1)
    y_test_truth = y_test[:, 1]
    do_metrics(y_test_truth, y_predict)
    dataframe = pd.DataFrame({'y_test_truth': y_test_truth,
                              'y_predict_score': y_predict_list[:, 1],  # 这个才是最终的内容，不要再修改其中的值了
                              'y_predict_label': y_predict})
    dataframe.to_csv("./" + name + "/" + name + '.csv', sep=',', index=False)


def rcnnvariant():
    os.mkdir("rcnnvariant")
    print("trainning rcnnvariant")
    print(x_train.shape)
    rcnnvamodel = RCNNVariant(400, 24800, 128)
    rcnnvamodel.compile(loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
                      optimizer=tf.keras.optimizers.Adam(0.001),
                      metrics=['accuracy'])
    rcnnvamodel.fit(x_train,
                  y_train_class,
                  epochs=5,
                  validation_data=(x_test, y_test_class),
                  batch_size=64,
                  shuffle=True
                  )
    print("*****************************************")
    print("rcnnvariant")
    model_prediction(rcnnvamodel, "rcnnvariant", x_test, y_test_class)
    tf.saved_model.save(rcnnvamodel, "./rcnnvariant/saved/1")
    print("rcnnvariant预测结束")
    print("*****************************************")

class RCNNVariant(Model):
    """Variant of RCNN.
        Base on structure of RCNN, we do some improvement:
        1. Ignore the shift for left/right context.
        2. Use Bidirectional LSTM/GRU to encode context.
        3. Use Multi-CNN to represent the semantic vectors.
        4. Use ReLU instead of Tanh.
        5. Use both AveragePooling and MaxPooling.
    """

    def __init__(self,
                 maxlen,
                 max_features,
                 embedding_dims,
                 kernel_sizes=[3, 4, 5],
                 class_num=2,
                 last_activation='softmax'):
        super(RCNNVariant, self).__init__()
        self.maxlen = maxlen
        self.max_features = max_features
        self.embedding_dims = embedding_dims
        self.kernel_sizes = kernel_sizes
        self.class_num = class_num
        self.last_activation = last_activation
        self.embedding = Embedding(self.max_features, self.embedding_dims, input_length=self.maxlen)
        self.bi_rnn = Bidirectional(LSTM(128, return_sequences=True))
        self.concatenate = Concatenate()
        self.convs = []
        for kernel_size in self.kernel_sizes:
            conv = Conv1D(128, kernel_size, padding='valid', activation='tanh', kernel_regularizer=regularizers.l2(0.001))
            self.convs.append(conv)
        self.avg_pooling = GlobalAveragePooling1D()
        self.max_pooling = GlobalMaxPooling1D()
        self.classifier = Dense(self.class_num, activation=self.last_activation)

    def call(self, inputs):
        if len(inputs.get_shape()) != 2:
            raise ValueError('The rank of inputs of TextRNN must be 2, but now is %d' % len(inputs.get_shape()))
        if inputs.get_shape()[1] != self.maxlen:
            raise ValueError('The maxlen of inputs of TextRNN must be %d, but now is %d' % (self.maxlen, inputs.get_shape()[1]))
        embedding = self.embedding(inputs)
        x_context = self.bi_rnn(embedding)
        x = self.concatenate([embedding, x_context])
        convs = []
        for i in range(len(self.kernel_sizes)):
            conv = self.convs[i](x)
            convs.append(conv)
        poolings = [self.avg_pooling(conv) for conv in convs] + [self.max_pooling(conv) for conv in convs]
        x = self.concatenate(poolings)
        output = self.classifier(x)
        return output


if __name__ == "__main__":
    rcnnvariant()