import numpy as np
import datetime
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, SimpleRNN


from data_utils import pad_trunc
from data_utils import tokenize_and_vectorize
from data_utils import load_googlenews_vec
from data_utils import pre_process_data
from data_utils import collect_expected

from matplotlib import pyplot as plt

imdb_filepath = 'C:/Software/Code_soft/Work_Spacee/python_code/IMDB_Classified/train/aclImdb/train'
google_filepath = 'C:/Software/Code_soft/Work_Spacee/python_code/IMDB_Classified/train/GoogleNews-vectors-negative300' \
                  '.bin '
np.random.seed(1020)
maxlen = 400
batch_size = 16
embedding_dims = 300
epochs = 2

cnn_weights_path = 'result/RNN_weights.h5'

word_vectors = load_googlenews_vec(google_filepath)
dataset = pre_process_data(imdb_filepath)
vectorized_data = tokenize_and_vectorize(dataset)
expected = collect_expected(dataset)
# ==============================划分训练集和测试集=============================================
splite_point = int(len(vectorized_data) * .8)
x_train = vectorized_data[:splite_point]
y_train = expected[:splite_point]
x_test = vectorized_data[splite_point:]
y_test = expected[splite_point:]

# ==============================收集扩展和截断数据=============================================
x_train = pad_trunc(x_train, maxlen)
x_test = pad_trunc(x_test, maxlen)
x_train = np.reshape(x_train, (len(x_train), maxlen, embedding_dims))
y_train = np.array(y_train)
x_test = np.reshape(x_test, (len(x_test), maxlen, embedding_dims))
y_test = np.array(y_test)

print('=======这是x——test======', x_test)
print('=======这是y——test======', y_test)
start_time = datetime.datetime.now()
print("build model ....")
num_neurons = 50
model = Sequential()

model.add(SimpleRNN(
    num_neurons,
    return_sequences=True,
    input_shape=(maxlen, embedding_dims)

))
model.add(Dropout(.5))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))

model.compile('adam', 'binary_crossentropy', metrics=['accuracy'])

model.summary()

history = model.fit(x_train, y_train,
          batch_size=batch_size,
          epochs=epochs,
          validation_data=(x_test, y_test)
          )
model_structure = model.to_json()
rnn_weights_path = 'result/RNN_weights.h5'
with open('result/RNN_model.json', 'w', encoding='utf-8') as f:
    f.write(model_structure)
model.save_weights(rnn_weights_path)

# ==========================
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

plt.subplot(1, 2, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.title('Training and Validation Accuracy')
plt.legend()

plt.subplot(1, 2, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.title('Training and Validation Loss')
plt.legend()
plt.show()

end_time = datetime.datetime.now()
time_length = (end_time - start_time).seconds
print('训练时间 ========》', time_length)




