#-*- coding:utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import keras
keras.__version__
import copy
from keras.datasets import reuters
import numpy as np


(train_data, train_labels), (test_data, test_labels) = reuters.load_data(num_words=10000)


# 
# Like with the IMDB dataset, the argument `num_words=10000` restricts the data to the 10,000 most frequently occurring words found in the 
# data.
# 
# We have 8,982 training examples and 2,246 test examples:

 


len(train_data)
len(test_data)


# As with the IMDB reviews, each example is a list of integers (word indices):

 


train_data[10]


# Here's how you can decode it back to words, in case you are curious:

 


word_index = reuters.get_word_index()
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
# Note that our indices were offset by 3
# because 0, 1 and 2 are reserved indices for "padding", "start of sequence", and "unknown".
decoded_newswire = ' '.join([reverse_word_index.get(i - 3, '?') for i in train_data[0]])


 


decoded_newswire


# The label associated with an example is an integer between 0 and 45: a topic index.

 


train_labels[10]


# ## Preparing the data
# 
# We can vectorize the data with the exact same code as in our previous example:

 

def vectorize_sequences(sequences, dimension=10000):
    results = np.zeros((len(sequences), dimension))
    for i, sequence in enumerate(sequences):
        results[i, sequence] = 1.
    return results

# Our vectorized training data
x_train = vectorize_sequences(train_data)
# Our vectorized test data
x_test = vectorize_sequences(test_data)


# 为了把标签向量化，有两种方式
# 一种是转化为张量
# 一种是进行独热编码



def to_one_hot(labels, dimension=46):
    results = np.zeros((len(labels), dimension))
    for i, label in enumerate(labels):
        results[i, label] = 1.
    return results

# Our vectorized training labels
one_hot_train_labels = to_one_hot(train_labels)
# Our vectorized test labels
one_hot_test_labels = to_one_hot(test_labels)


# Note that there is a built-in way to do this in Keras, which you have already seen in action in our MNIST example:

 


from keras.utils.np_utils import to_categorical

one_hot_train_labels = to_categorical(train_labels)
one_hot_test_labels = to_categorical(test_labels)


# ## Building our network
# 
# 
# This topic classification problem looks very similar to our previous movie review classification problem: in both cases, we are trying to 
# classify short snippets of text. There is however a new constraint here: the number of output classes has gone from 2 to 46, i.e. the 
# dimensionality of the output space is much larger. 
# 
# In a stack of `Dense` layers like what we were using, each layer can only access information present in the output of the previous layer. 
# If one layer drops some information relevant to the classification problem, this information can never be recovered by later layers: each 
# layer can potentially become an "information bottleneck". In our previous example, we were using 16-dimensional intermediate layers, but a 
# 16-dimensional space may be too limited to learn to separate 46 different classes: such small layers may act as information bottlenecks, 
# permanently dropping relevant information.
# 
# For this reason we will use larger layers. Let's go with 64 units:

 
#----------------------下面是训练8个epoch看看accurach和loss在图中的变化-------------------------------------------
from keras import models
from keras import layers

model = models.Sequential()
model.add(layers.Dense(64, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(46, activation='softmax'))



# 关于这个网络有其他两点需要你的注意
# 这个网络是以一个４６节点大小的Ｄｅｎｓｅ层结束的
# 这个意味着：
# 对于每个输入样本，这个网络会输出一个４６维度向量。
# 这个向量中的每个维度都会输出一个不同的类别

# 最后一层使用了一个ｓｏｆｔｍａｘ激活函数，意思是，这个网络将会输出一个涵盖４６个类别的概率分布

# 对于每个输入样本，网络会输出一个４６维度向量，向量中的`output[i]`是该输入样本属于类别ｉ的概率。

 


model.compile(optimizer='rmsprop',
              loss='categorical_crossentropy',
              metrics=['accuracy'])


# ## Validating our approach
# 
# Let's set apart 1,000 samples in our training data to use as a validation set:


# 分配训练集
partial_x_train = x_train[1000:]
partial_y_train = one_hot_train_labels[1000:]
# 分配验证集
x_val = x_train[:1000]
y_val = one_hot_train_labels[:1000]


# 开始训练
history = model.fit(partial_x_train,
                    partial_y_train,
                    epochs=20,
                    batch_size=512,
                    validation_data=(x_val, y_val))


# Let's display its loss and accuracy curves:


import matplotlib.pyplot as plt

loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(1, len(loss) + 1)

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()

plt.show()


 


plt.clf()   # clear figure

acc = history.history['acc']
val_acc = history.history['val_acc']

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()

plt.show()
#----------------------下面是训练8个epoch，并对验证集进行测试-------------------------------------------

# It seems that the network starts overfitting after 8 epochs. 
# Let's train a new network from scratch for 8 epochs, then let's evaluate it on the test set。


model = models.Sequential()
model.add(layers.Dense(64, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(46, activation='softmax'))

model.compile(optimizer='rmsprop',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
model.fit(partial_x_train,
          partial_y_train,
          epochs=8,
          batch_size=512,
          validation_data=(x_val, y_val))
results = model.evaluate(x_test, one_hot_test_labels)


results


test_labels_copy = copy.copy(test_labels)
np.random.shuffle(test_labels_copy)
float(np.sum(np.array(test_labels) == np.array(test_labels_copy))) / len(test_labels)

# 对于新数据产生预测

predictions = model.predict(x_test)


# Each entry in `predictions` is a vector of length 46:


ｐｒｉｎｔ"predictions[0].shape",predictions[0].shape


# The coefficients in this vector sum to 1:


print"输出层的结果的和："，np.sum(predictions[0])


# The largest entry is the predicted class, i.e. the class with the highest probability:

 


np.argmax(predictions[0])

#----------------------下面是故意改变中间层的节点数，来看下效果------------------------------------------
# 另外一种方式处理标签和ｌｏｓｓ
# 我们前面提到另外一种方式就是
# 把这些标签编码＋映射为一个整数张量(tensor)

y_train = np.array(train_labels)
y_test = np.array(test_labels)

# 唯一需要改变的是ｌｏｓｓ函数
# 我们之前的ｌｏｓｓ函数是`categorical_crossentropy`
# 现在对于整数型的ｌａｂｅｌ，我们应该使用`sparse_categorical_crossentropy`:


 


model.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy', metrics=['acc'])

# 新的ｌｏｓｓ函数在数学上与 `categorical_crossentropy`是一样的。
# 这个只是一个形式不同的接口。

# 注意，我们必须有一个中分大的中间层。
# 由于分类数是４６，所以我们的中间层不能少于４６太多.
# 为了证明这个观点，来坐下实验
model = models.Sequential()
model.add(layers.Dense(64, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(4, activation='relu'))
model.add(layers.Dense(46, activation='softmax'))

model.compile(optimizer='rmsprop',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
model.fit(partial_x_train,
          partial_y_train,
          epochs=20,
          batch_size=128,
          validation_data=(x_val, y_val))


# 在降低中间层的节点数以后，
# 我们的神经网络看起来似乎最高是71%，降低了８％


# 从以上实验中你需要学会的是：
# 如果你试图对数据进行Ｎ分类，那么你的最后一层Ｄｅｎｓｅ应该包含Ｎ个节点。
# 对于多分类问题而言
# 你的网络应该以一个softmax激活函数作为结束，这样他就会输出关于N个类别的概率分布。

# 注意，上面的官方注释的意思是：
# 最后一层隐藏层和输出层之间不是全连接的，而是单线连接的。

# 对于这样的问题，你应该使用'categorical_crossentropy'作为loss函数
# 它会最小化该网络输出的概率分布与真实目标之间的差距



# 有两种方式处理多分类的标签：
# 一种是独热编码，并且使用`categorical_crossentropy`作为loss函数
# 一种是编码为整数张量，使用`sparse_categorical_crossentropy`作为loss函数


# 中间层的节点数一般不要少于分类数太多。
