|
import tensorflow as tf |
|
from tensorflow.keras import layers, models |
|
from tensorflow.keras.datasets import imdb |
|
from tensorflow.keras.preprocessing.sequence import pad_sequences |
|
|
|
|
|
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000) |
|
|
|
|
|
max_length = 500 |
|
train_data = pad_sequences(train_data, maxlen=max_length) |
|
test_data = pad_sequences(test_data, maxlen=max_length) |
|
|
|
|
|
model = models.Sequential() |
|
model.add(layers.Embedding(input_dim=10000, output_dim=16, input_length=max_length)) |
|
model.add(layers.Flatten()) |
|
model.add(layers.Dense(32, activation='relu')) |
|
model.add(layers.Dense(1, activation='sigmoid')) |
|
|
|
|
|
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) |
|
|
|
|
|
model.summary() |
|
|
|
|
|
history = model.fit(train_data, train_labels, epochs=5, batch_size=32, validation_split=0.2) |
|
|
|
|
|
test_loss, test_accuracy = model.evaluate(test_data, test_labels) |
|
print(f'Test Accuracy: {test_accuracy * 100:.2f}%') |
|
|