# 影评分成两类：积极和消极
import numpy as np
from keras.layers import Dense
from tensorflow import keras
from tensorflow.keras.datasets import imdb

(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)


# print(len(train_data))
# print(train_labels.shape)
# print(test_data.shape)
# print(test_labels.shape)

# 单词字典
# word_index = imdb.get_word_index()
# assert isinstance(word_index, dict)
# print(len(word_index))

# 查看某一条影片
# reverse_word_index = dict([(v, k) for (k, v) in word_index.items()])
# film_review_indexes = train_data[0]
# film_review = [reverse_word_index.get(index - 3, "?") for index in film_review_indexes]

# print(' '.join(film_review))
# print(train_labels[0])

# 查看最大索引
# max_index = max([max(sequence) for sequence in train_data])
# print(f'max_index={max_index}')


# 使用multi-hot编码, 一维变二维张量
def data_to_vectorization(data: np.ndarray, ndim: int):
    assert data.ndim == 1
    results = np.zeros(shape=(len(data), ndim))
    for i, sequence in enumerate(data):
        for j in sequence:
            results[i, j] = 1
    return results

# 定义模型
# sigmoid 函数最终输出一个0-1之间的值
model = keras.Sequential(
    layers=[
        Dense(units=16, activation='relu'),
        Dense(units=1, activation=keras.activations.sigmoid)
    ]
)

model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])

# 部分数据作为验证数据
validation_size = 10000
validation_data = train_data[:validation_size]
train_data = train_data[validation_size:]
validation_labels = train_labels[:validation_size]
train_labels = train_labels[validation_size:]

# print(validation_data)
# print(validation_labels)

assert len(validation_data)==validation_size
assert len(validation_labels)==validation_size

vectorized_train_data = data_to_vectorization(train_data, 10000)
vectorized_val_data = data_to_vectorization(validation_data, 10000)
# print(vectorized_val_data.shape)

# epochs=10 时可观察到过拟合
history=model.fit(x=vectorized_train_data, y=train_labels, batch_size=128, epochs=20,
                  validation_data=(vectorized_val_data, validation_labels))

# 评估模型
# predictions=model.predict(data_to_vectorization(test_data,10000))
# zero_or_ones=np.array([1 if p>0.5 else 0 for p in predictions])
# assert zero_or_ones.shape==test_labels.shape
# matches = zero_or_ones==test_labels
# print(f'测试精度:{np.mean(matches)}')

evaluate_result=model.evaluate(x=data_to_vectorization(test_data, 10000), y=test_labels)
print(evaluate_result)