# 路透社新闻分类
from tensorflow import keras
from tensorflow.keras.datasets import reuters
import numpy as np

# 取top N单词作为用于训练的有效输入
num_words = 10000
(train_data, train_labels), (test_data, test_labels) = reuters.load_data(num_words=num_words)
number_of_categories = 46
print(train_data.shape)
print(train_labels.shape)
print(test_data.shape)
print(test_labels.shape)
print(train_labels)

word_index = reuters.get_word_index()
assert isinstance(word_index, dict)
reversed_word_index = dict([(v, k) for (k, v) in word_index.items()])

# 打印第一条新闻
first_news = [reversed_word_index.get(i - 3, "?") for i in train_data[0]]
print(' '.join(first_news))

model = keras.Sequential(
    layers=[
        keras.layers.Dense(units=64, activation='relu'),
        keras.layers.Dense(units=number_of_categories, activation='softmax')
    ]
)

# 使用multi-hot编码, 一维变二维张量
def data_to_vectorization(data: np.ndarray, ndim: int):
    assert data.ndim == 1
    results = np.zeros(shape=(len(data), ndim))
    for i, sequence in enumerate(data):
        for j in sequence:
            results[i, j] = 1
    return results

x_train = data_to_vectorization(train_data, num_words)
x_test = data_to_vectorization(test_data, num_words)
num_val_data = 1000
val_data = x_train[:num_val_data]
val_labels = train_labels[:num_val_data]
x_train = x_train[num_val_data:]
train_labels = train_labels[num_val_data:]

print(x_train.shape)
print(train_labels.shape)

model.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(x=x_train, y=train_labels, epochs=6, batch_size=128, validation_data=(val_data,val_labels))

model.evaluate(x=x_test,y=test_labels)
