import tensorflow as tf
from tensorflow import keras
import pandas as pd
from sklearn.model_selection import train_test_split

# service
# path = "/root/justnow/finalpaper/data.csv"

# windows
path = "D:/learn/school/code/myfinalpaper/data/rawdata20201226.csv"
df = pd.read_csv(path)

x = df['data']
y = df['label']

with open("wrongdata.txt", "w") as f:
    for i in x:
        if "__" in i:
            f.writelines(x)


# tokenizer = keras.preprocessing.text.Tokenizer(filters='!"#$%&()*+,-./:;<=>?@[\\]^`{|}~\t\n') #创建一个Token，用来讲文本的词汇转回为索引数字
tokenizer = keras.preprocessing.text.Tokenizer(filters=' ') #创建一个Token，用来讲文本的词汇转回为索引数字
tokenizer.fit_on_texts(x)
vocab = tokenizer.word_index #得到每个词的编号

x_id = tokenizer.texts_to_sequences(x)

x = keras.preprocessing.sequence.pad_sequences(x_id, maxlen=400, padding='post', truncating='post')

x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)

y_train = tf.keras.utils.to_categorical(y_train, num_classes=2)
y_test = tf.keras.utils.to_categorical(y_test, num_classes=2)


model = keras.Sequential()
model.add(keras.layers.Embedding(20480, 128))
model.add(keras.layers.Bidirectional(tf.keras.layers.GRU(128)))
model.add(keras.layers.Dropout(0.2))
model.add(keras.layers.Dense(2, activation="softmax"))

model.summary()

model.compile(loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
              optimizer=tf.keras.optimizers.Adam(0.001),
              metrics=['accuracy'])

model.fit(x = x_train,
          y = y_train,
          epochs=5,
          validation_data = (x_test, y_test),
          batch_size=32,
          shuffle=True
          )


