import datetime
import os

import tensorflow as tf
from tensorflow import keras
import pandas as pd
from sklearn.model_selection import train_test_split
import tensorflow.compat.v1 as tf1

# # windows path
# path = "D:/learn/school/code/myfinalpaper/data/rawdata.csv"
#
# # server path
path = "/root/justnow/finalpaper/data.csv"
#
# # 读取csv中的数据
df = pd.read_csv(path)

x = df['data']
y = df['label']
#
# number_normal = df[df["label"] == 1] #正常脚本数据
# number_webshell = df[df["label"] == 0] #webshell脚本数据
# # print(number_normal.shape)(4344, 2)
# # print(number_webshell.shape) (1486, 2)
# # 文本长度统一
#
# # normal_sample =number_normal.sample(100) #抽取400个样本
# # webshell_sample = number_webshell.sample(10) # 抽取20个样本
# #
# # new_df = pd.concat([normal_sample, webshell_sample]) #将这个两个样本垂直合并
# #
# # print(new_df.shape)
# # x = new_df['data']
# # y = new_df['label']
#
#
# # 使用Tokenizer将文本中的词句转换为数字索引
tokenizer = keras.preprocessing.text.Tokenizer(filters='!"#$%&()*+,-./:;<=>?@[\\]^`{|}~\t\n') #创建一个Token，用来讲文本的词汇转回为索引数字
tokenizer.fit_on_texts(x)
vocab = tokenizer.word_index #得到每个词的编号

# pd.DataFrame(vocab, index=[0]).to_json("temp.json")

x_id = tokenizer.texts_to_sequences(x)

x_padded_seqs = keras.preprocessing.sequence.pad_sequences(x_id, maxlen=400, padding='post', truncating='post')


# 划分测试集和训练集
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
#将每个样本中的每个词转换为数字列表，使用每个词的编号进行编号
x_train_word_ids = tokenizer.texts_to_sequences(x_train)
x_test_word_ids = tokenizer.texts_to_sequences(x_test)
y_train = tf.keras.utils.to_categorical(y_train, num_classes=2)
y_test = tf.keras.utils.to_categorical(y_test, num_classes=2)

#每条样本长度不唯一，将每条样本的长度设置一个固定值,
x_train_padded_seqs = keras.preprocessing.sequence.pad_sequences(x_train_word_ids, maxlen=400, padding='post')
x_test_padded_seqs = keras.preprocessing.sequence.pad_sequences(x_test_word_ids, maxlen=400, padding='post')

print(x_train_padded_seqs.shape, x_test_padded_seqs.shape) (4664, 400) (1166, 400)


# 使用小论文中处理好的数据进行分析比对总结
# path = "D:/learn/school/code/myfinalpaper/data/middledata.csv"
#
# df = pd.read_csv(path)
#
# x = df['data']
# y = df['label']


print(x)
model = keras.Sequential([
    keras.layers.Embedding(20480, 128),
    keras.layers.Bidirectional(tf.keras.layers.GRU(128)),
    keras.layers.Dropout(0.2),
    keras.layers.Dense(2, activation="softmax")
])

model.summary()

# 编译模型
model.compile(loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
              optimizer=tf.keras.optimizers.Adam(0.001),
              metrics=['accuracy'])
log_dir = os.path.join("logs" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
# 使用tokenid创建训练模型
# history = model.fit(x_train_padded_seqs,
#                     y_train,
#                     epochs=1,
#                     validation_data=(x_test_padded_seqs, y_test),
#                     batch_size = 200,
#                     shuffle = True
#                     )

# 使用小论文中的中间id训练模型
# history = model.fit(x_train_padded_seqs,
#                     y_train,
#                     epochs=1,
#                     validation_data=(x_test_padded_seqs, y_test),
#                     batch_size = 200,
#                     shuffle = True
#                     )

