# -*- coding: utf-8 -*-
# import tensorflow as tf
# import tensorflow.compat.v1 as tf
# tf.disable_v2_behavior()
# from  tensorflow.examples.tutorials.mnist import input_data
# mnist_data = input_data.read_data_sets("G:\\code\\dataset\\mnist", one_hot=True)#, source_url=http://yann.lecun.com/exdb/mnist/)




# mnist_numpy = mnist_data.as_numpy(mnist_data)
# mnist_img, mnist_lable = mnist_data["images"], mnist_data["lable"]
# # print(mnist_data_info)
# print(mnist_img)
# print(mnist_lable)

# import numpy as np
# import tensorflow as tf
# # import matplotlib.pyplot as plt
# import tensorflow_datasets as tfds

# # plt.subplot(333)
# mnist_data = tfds.load(name="mnist", download=True, data_dir="G:\\code\\dataset", split="train", shuffle_files=True)#, batch_size=-1)#, with_info=True)
# # for mnist_example in mnist_data.take(10):  # 只取一个样本
# #     image, label = mnist_example["image"], mnist_example["label"]

# #     plt.imshow(image.numpy()[:, :, 0].astype(np.float32), cmap=plt.get_cmap("gray"))
# #     print("Label: %d" % label.numpy())

# # plt.show()


# batch_size = 100
# batch_num = 500
# # batch_num = mnist_data.train.num_example//batch_size
# # print(mnist_data['image'].shape + " and " + mnist_data['label'].shape)

# x = tf.placeholder(tf.float32, [None, 784])
# y = tf.placeholder(tf.float32, [None, 10])

# weights ={
#     'hidden_1':tf.Variable(tf.random_normal([784, 256])),
#     'out':tf.Variable(tf.random_normal([256, 10]))
# }
# biases = {
#     'b1':tf.Variable(tf.random_normal([256])),
#     'out':tf.Variable(tf.random_normal([10]))
# }
# def neural_network(x):
#     hidden_layer_1 = tf.add(tf.matmul(x, weights['hidden_1'], biases['b1']))
#     out_layer = tf.matmul(hidden_layer_1, weights['out'] + biases['out'])
#     return out_layer

# result = neural_network(x)
# prediction = tf.nn.softmax(result)
# loss = tf.reduce_mean(tf.square(y - prediction))
# train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
# correct_pred = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1))
# accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

# init = tf.global_variables_initializer()

# step_num = 400
# with tf.Session() as sess:
#     sess.run(init)
#     for step in range(step_num):
#         for batch in range(batch_num):
#             batch_x, batch_y = mnist_data.train.next_batch(batch_size)
#             sess.run(train_step, feed_dict={x:mnist_data.test.images, y:mnist_data.test.labels})
#             print("Step : " + str(step) + "Tranining Accuracy" + "{:.3f}" + str(acc))
#     print("Finished")

import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
use_dataset=5


#plt
def plot_image(i, predictions_array, true_label, img):
  predictions_array, true_label, img = predictions_array, true_label[i], img[i]
  plt.grid(False)
  plt.xticks([])
  plt.yticks([])

  plt.imshow(img, cmap=plt.cm.binary)

  predicted_label = np.argmax(predictions_array)
  if predicted_label == true_label:
    color = 'blue'
  else:
    color = 'red'

  plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
                                100*np.max(predictions_array),
                                class_names[true_label]),
                                color=color)

def plot_value_array(i, predictions_array, true_label):
  predictions_array, true_label = predictions_array, true_label[i]
  plt.grid(False)
  plt.xticks(range(10))
  plt.yticks([])
  thisplot = plt.bar(range(10), predictions_array, color="#777777")
  plt.ylim([0, 1])
  predicted_label = np.argmax(predictions_array)

  thisplot[predicted_label].set_color('red')
  thisplot[true_label].set_color('blue')


def decode_review(init, text='0'):
    return ' '.join([reverse_word_index.get(i, '?') for i in text])

# load the datasets 
if use_dataset == 1: 
    dataset = tf.keras.datasets.mnist
    (x_train, y_train), (x_test, y_test) = dataset.load_data()#(path="G:\code\dataset\keras\mnist")
    x_train, x_test = x_train / 255.0, x_test / 255.0
    model = tf.keras.models.Sequential([
    tf.keras.layers.Flatten(input_shape=(28, 28)),
    tf.keras.layers.Dense(128, activation='relu'),
    tf.keras.layers.Dropout(0.2),
    tf.keras.layers.Dense(10, activation='softmax')
    ])
    model.compile(optimizer='adam',
                # loss='sparse_categorical_crossentropy',
                loss=tf.keras.losses.SparseCategoricalCrossentropy(),
                metrics=['accuracy'])
    class_names = ['0', '1', '2', '3', '4','5', '6', '7', '8', '9']
elif use_dataset == 2: 
    dataset = tf.keras.datasets.fashion_mnist
    (x_train, y_train), (x_test, y_test) = dataset.load_data()#(path="G:\code\dataset\keras\mnist")
    x_train, x_test = x_train / 255.0, x_test / 255.0
    model = tf.keras.models.Sequential([
        tf.keras.layers.Flatten(input_shape=(28, 28)),
        tf.keras.layers.Dense(128, activation='relu'),
        tf.keras.layers.Dense(10)
    ])
    model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])
    class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
               'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
elif use_dataset == 3: 
    dataset = tf.keras.datasets.cifar10
    (x_train, y_train), (x_test, y_test) = dataset.load_data()#(path="G:\code\dataset\keras\mnist")
    x_train, x_test = x_train / 255.0, x_test / 255.0
elif use_dataset == 4: 
    dataset = tf.keras.datasets.cifar100
    (x_train, y_train), (x_test, y_test) = dataset.load_data()
    x_train, x_test = x_train / 255.0, x_test / 255.0
elif use_dataset == 5: 
    dataset = tf.keras.datasets.imdb
    (x_train, x_test), (y_train, y_test) = dataset.load_data(num_words=10000)
    # 输入形状是用于电影评论的词汇数目（10,000 词）
    model = tf.keras.Sequential()
    model.add(tf.keras.layers.Embedding(10000, 16))
    model.add(tf.keras.layers.GlobalAveragePooling1D())
    model.add(tf.keras.layers.Dense(16, activation='relu'))
    model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
    model.summary()
    model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])
    # 一个映射单词到整数索引的词典
    word_index = dataset.get_word_index()
    # 保留第一个索引
    word_index = {k:(v+3) for k,v in word_index.items()}
    word_index["<PAD>"] = 0
    word_index["<START>"] = 1
    word_index["<UNK>"] = 2  # unknown
    word_index["<UNUSED>"] = 3

    reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])

    x_train = tf.keras.preprocessing.sequence.pad_sequences(x_train,
                                                        value=word_index["<PAD>"],
                                                        padding='post',
                                                        maxlen=256)
    y_train = tf.keras.preprocessing.sequence.pad_sequences(y_train,
                                                       value=word_index["<PAD>"],
                                                       padding='post',
                                                       maxlen=256)

if use_dataset <= 4:
    plt.figure(figsize=(10,10))
    for i in range(25):
        plt.subplot(5,5,i+1)
        plt.xticks([])
        plt.yticks([])
        plt.grid(False)
        plt.imshow(x_train[i], cmap=plt.cm.binary)
        # plt.xlabel(x_test[i])
    plt.show()
        
    # Tranining the model
    model.fit(x_train, y_train, epochs=5)
    model.evaluate(x_test,  y_test, verbose=2)

    #Verify model predictions
    probability_model = tf.keras.Sequential([model, 
                                            tf.keras.layers.Softmax()])
    predictions = probability_model.predict(x_test)
    print(predictions[0])
    print(np.argmax(predictions[0]))
    print(y_test[0])


    # Plot the first X test images, their predicted labels, and the true labels.
    # Color correct predictions in blue and incorrect predictions in red.
    # if use_dataset <= 4:
        # num_rows = 5#7
        # num_cols = 3#5
        # num_images = num_rows*num_cols
        # plt.figure(figsize=(2*2*num_cols, 2*num_rows))
        # for i in range(num_images):
        # plt.subplot(num_rows, 2*num_cols, 2*i+1)
        # plot_image(i, predictions[i], y_test, x_test)
        # plt.subplot(num_rows, 2*num_cols, 2*i+2)
        # plot_value_array(i, predictions[i], y_test)
        # plt.tight_layout()
        # plt.show()

    # useing the train model
    # Grab an image from the test dataset.
    testnum=15
    img = x_test[testnum]
    print(img.shape)
    # Add the image to a batch where it's the only member.
    img = (np.expand_dims(img,0))
    print(img.shape)
    predictions_single = probability_model.predict(img)
    print(predictions_single)
    plt.figure(figsize=(2, 1))
    plt.subplot(1, 2, 1)
    plot_image(testnum, predictions_single[0], y_test, x_test)
    plt.subplot(1, 2, 2)
    plot_value_array(testnum, predictions_single[0], y_test)
    _ = plt.xticks(range(10), class_names, rotation=45)
    print(np.argmax(predictions_single[0]))
    plt.tight_layout()
    plt.show()
elif use_dataset == 5:
    print("Training entries: {}, labels: {}".format(len(x_train), len(x_test)))
    print(x_train[0])

    x_val = x_train[:10000]
    partial_x_train = x_train[10000:]
    y_val = x_test[:10000]
    partial_y_train = x_test[10000:]

    history = model.fit(partial_x_train,
                        partial_y_train,
                        epochs=40,
                        batch_size=512,
                        validation_data=(x_val, y_val),
                        verbose=1)
    results = model.evaluate(y_train,  y_test, verbose=2)
    print(results)

    history_dict = history.history
    history_dict.keys()
    acc = history_dict['accuracy']
    val_acc = history_dict['val_accuracy']
    loss = history_dict['loss']
    val_loss = history_dict['val_loss']

    epochs = range(1, len(acc) + 1)

    # “bo”代表 "蓝点"
    plt.plot(epochs, loss, 'bo', label='Training loss')
    # b代表“蓝色实线”
    plt.plot(epochs, val_loss, 'b', label='Validation loss')
    plt.title('Training and validation loss')
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.legend()

    plt.show()

    plt.clf()   # 清除数字

    plt.plot(epochs, acc, 'bo', label='Training acc')
    plt.plot(epochs, val_acc, 'b', label='Validation acc')
    plt.title('Training and validation accuracy')
    plt.xlabel('Epochs')
    plt.ylabel('Accuracy')
    plt.legend()

    plt.show()