# -*- coding: utf-8 -*-

'''
Created on 2018年8月23日

@author: 李德庚

Tensorflow 教程   https://www.tensorflow.org/tutorials/keras/basic_text_classification


'''

import tensorflow as tf
from tensorflow import keras
import os

import matplotlib.pyplot as plt


'''
This notebook classifies movie reviews as positive or negative 
using the text of the review. This is an example of binary—or 
two-class—classification, an important and widely applicable 
kind of machine learning problem.

We'll use the IMDB dataset that contains the text of 50,000 movie reviews 
from the Internet Movie Database. These are split into 25,000 reviews 
for training and 25,000 reviews for testing. The training and testing sets 
are balanced, meaning they contain an equal number of positive and negative reviews.
'''

'''
 we are interested in the top 10,000 most frequently occurring words in the training data. 
 The rare words are discarded to keep the size of the data manageable
'''
vocab_size = 10000

imdb = keras.datasets.imdb
imdb_fname = os.path.join(os.environ['KERAS_DATASETS'], 'imdb/imdb.npz')
imdb_index = os.path.join(os.environ['KERAS_DATASETS'], 'imdb/imdb_word_index.json')
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(path=imdb_fname,
                                                                 num_words=vocab_size)

'''
The dataset comes preprocessed: each example is an array of integers representing 
the words of the movie review. Each label is an integer value of either 0 or 1, 
where 0 is a negative review, and 1 is a positive review.
'''

print("Training entries: {}, labels: {}".format(len(train_data), len(train_labels)))

'''
The text of reviews have been converted to integers, where each integer represents 
a specific word in a dictionary. 
'''

print("First review(raw):")
print(train_data[0])

# Movie reviews may be different lengths. 

print("Length of review 0={}, review 1={}".format(len(train_data[0]), len(train_data[1])))

# Convert the integers back to words

# A dictionary mapping words to an integer index, which is a json class
word_index = imdb.get_word_index(path=imdb_index)

# The first indices are reserved
word_index = {k:(v + 3) for k, v in word_index.items()} 
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2  # unknown
word_index["<UNUSED>"] = 3

reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])


# convert index to word. text is a array of integers.
def decode_review(text):
    return ' '.join([reverse_word_index.get(i, '?') for i in text])


print("Decoded review:")
print(decode_review(train_data[0]))

# Prepare the data
'''
he reviews—the arrays of integers—must be converted to tensors before 
fed into the neural network. This conversion can be done a couple of ways:

One-hot-encode the arrays to convert them into vectors of 0s and 1s. F
or example, the sequence [3, 5] would become a 10,000-dimensional vector 
that is all zeros except for indices 3 and 5, which are ones. Then, make this 
the first layer in our network—a Dense layer—that can handle floating point 
vector data. This approach is memory intensive, though, requiring 
a num_words * num_reviews size matrix.

Alternatively, we can pad the arrays so they all have the same length, 
then create an integer tensor of shape num_examples * max_length. 
We can use an embedding layer capable of handling this shape as the first layer 
in our network.

In this tutorial, we will use the second approach.

Since the movie reviews must be the same length, we will use the pad_sequences 
function to standardize the lengths
'''

train_data = keras.preprocessing.sequence.pad_sequences(train_data,
                                                      value=word_index["<PAD>"],
                                                      padding='post',
                                                      maxlen=256)
test_data = keras.preprocessing.sequence.pad_sequences(test_data,
                                                      value=word_index["<PAD>"],
                                                      padding='post',
                                                      maxlen=256)
# Let's look at the length of the examples now:

print("After padding, length of review 0={}, review 1={}"
      .format(len(train_data[0]), len(train_data[1])))

# And inspect the (now padded) first review:
print("After padding, first review data:")
print(train_data[0])

# Build the model

model = keras.Sequential()
model.add(keras.layers.Embedding(vocab_size, 16))
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation=tf.nn.relu))
model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))

print(model.summary())

model.compile(optimizer=tf.train.AdamOptimizer(),
              loss='binary_crossentropy',
              metrics=['accuracy'])

x_val = train_data[:10000]
partial_x_train = train_data[10000:]

y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]

history = model.fit(partial_x_train,
                    partial_y_train,
                    epochs=40,
                    batch_size=512,
                    validation_data=(x_val, y_val),
                    verbose=1)

results = model.evaluate(test_data, test_labels)

print(results)

history_dict = history.history

acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(1, len(acc) + 1)

# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')

# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()

plt.show()
plt.clf()   # clear figure
acc_values = history_dict['acc']
val_acc_values = history_dict['val_acc']

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()

plt.show()

pre = [1 if p[0] >= 0.5 else 0 for p in model.predict(test_data)]
diff = []

for i in range(len(test_data)):
    diff.append([test_labels[i], pre[i]])
    
    
diff=[k for k in diff if k[0]!=k[1]]
print('diff count:{}'.format(len(diff)))
