from keras.datasets import imdb
from keras import preprocessing
from keras.models import Sequential
from keras.layers import Flatten, Dense, Embedding

# 作为特征的单词个数
max_features = 1000

# 在这么多单词后截断文本（这些单词都属于前max_features个最常见的单词）
maxlen = 20

(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)  # 将数据加载为整数列表

# 将整数列表转换成形状为（samples，maxlen)的二维整数张量
x_train = preprocessing.sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = preprocessing.sequence.pad_sequences(x_test, maxlen=maxlen)

model = Sequential()
# 指定Embedding层的最大输入长度，以便后面将嵌入输入展平。Embedding层激活的形状为（samples，maxlen，8）
model.add(Embedding(10000, 8, input_length=maxlen))
# 将三维的嵌入张量展平成形状为（samples,maxlen * 8）的二维张量
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
model.summary()
history = model.fit(x_train, y_train,
                    epochs=10,
                    batch_size=32,
                    validation_split=0.2)
