import keras
from keras.datasets import imdb
from keras import layers
from keras.preprocessing import sequence
from keras.utils import pad_sequences

max_features = 2000
max_len = 500

(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
x_train = pad_sequences(x_train, maxlen=max_len)
x_test = pad_sequences(x_test, maxlen=max_len)
model = keras.models.Sequential()
model.add(layers.Embedding(max_features, 128))  # max_features表示数据集中的单词总数，即词典的大小
# 128表示每个单词的嵌入向量的大小 input_length表示输入序列的长度，即每个输入文本的最大单词数量，不足的将被填充
# name为Embedding层的名称
model.add(layers.Conv1D(32, 7, activation='relu'))
model.add(layers.MaxPool1D(5))
model.add(layers.Conv1D(32, 7, activation='relu'))
model.add(layers.GlobalMaxPool1D())
model.add(layers.Dense(1))
model.summary()
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
callbacks = [
    keras.callbacks.TensorBoard(
        log_dir='my_log_dir',  # 日志文件将被写入这个位置
        histogram_freq=1,  # 每一轮之后记录激活直方图
        embeddings_freq=1  # 每一轮之后记录嵌入数据
    )
]
history = model.fit(x_train, y_train, epochs=20, batch_size=128, validation_split=0.2, callbacks=callbacks)
