from keras.datasets import imdb
from keras import models, layers
import numpy as np
import matplotlib.pyplot as plt

(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=10000)

print(len(x_train[0]))


def get_review(review):
    word_index = imdb.get_word_index()
    reverse_word_index = dict(
        [(value, key) for (key, value) in word_index.items()]
    )
    decode_review = ' '.join(
        [reverse_word_index.get(i-3, '?') for i in review]
    )
    print(decode_review)


def vectorize_seq(seq, dimension=10000):
    results = np.zeros((len(seq), dimension))
    for i, seq in enumerate(seq):
        results[i, seq] = 1.0
    return results


# 初始化数据，将数据向量化
x_train = vectorize_seq(x_train)
x_test = vectorize_seq(x_test)

y_train = np.asarray(y_train).astype('float32')
y_test = np.asarray(y_test).astype('float32')

# 构建神经网络
model = models.Sequential()
model.add(layers.Dense(16, activation='relu', input_shape=(10000,)))
# model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))

model.compile(optimizer='rmsprop',
    loss='binary_crossentropy',
    metrics=['accuracy'])

x_val = x_train[:10000]
partial_x_train = x_train[10000:]

y_val = y_train[:10000]
partial_y_train = y_train[10000:]

# 训练模型
history = model.fit(
    partial_x_train,
    partial_y_train,
    epochs=20,
    batch_size=512,
    validation_data=(x_val, y_val)
)

history_dict1 = history.history
loss_values1 = history_dict1['loss']
val_loss_values1 = history_dict1['val_loss']

epochs = range(1, len(loss_values1)+1)


# 构建神经网络
model = models.Sequential()
model.add(layers.Dense(512, activation='relu', input_shape=(10000,)))
# model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))

model.compile(optimizer='rmsprop',
    loss='binary_crossentropy',
    metrics=['accuracy'])

# 训练模型
history = model.fit(
    partial_x_train,
    partial_y_train,
    epochs=20,
    batch_size=512,
    validation_data=(x_val, y_val)
)

history_dict2 = history.history
loss_values2 = history_dict2['loss']
val_loss_values2 = history_dict2['val_loss']

plt.plot(epochs, loss_values1, 'bo', label= 'Original Model')
plt.plot(epochs, loss_values2, 'b', label='Bigger Model')
plt.title('Original Model and Small Model')
plt.xlabel('Epochs')
plt.ylabel('Validation Loss')
plt.legend()
plt.show()

