from __future__ import absolute_import, division,print_function,unicode_literals
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy
import math
import matplotlib.pyplot as plt
import tqdm
import tqdm.auto
tqdm.tqdm = tqdm.auto.tqdm
import shutil
import os.path
import keras

export_dir = '../model/'
if os.path.exists(export_dir):
    shutil.rmtree(export_dir)

dataset,metadata = tfds.load('rock_paper_scissors',as_supervised=True,with_info=True)
train_dataset,test_dataset = dataset['train'],dataset['test']

num_train_examples = metadata.splits['train'].num_examples
num_test_examples = metadata.splits['test'].num_examples
print("Number of training examples: {}".format(num_train_examples))
print("Number of test examples:{}".format(num_test_examples))

print(train_dataset)
print(test_dataset)
# 将所有图像缩放为150*150
IMG_SIZE =150

# 预处理，归一化
def normalize(image,label):
    image = tf.cast(image,tf.float32)
    image = image/255
    image = tf.image.resize(image,(IMG_SIZE,IMG_SIZE))
    return image,label
def rot(image,label):
    image = tf.image.rot90(image,1)  #逆时针旋转90度
    return  image,label
def flip(image,label):
    image = tf.image.random_flip_left_right(image)   #水平翻转
    return image,label
def cut(image,label):
    image = tf.image.resize(image,[200,200])     #随机裁剪
    image = tf.image.random_crop(image,[150,150,3])
    return  image,label

train_dataset = train_dataset.map(normalize)
test_dataset = test_dataset.map(normalize)

train_dataset_1 = train_dataset.map(rot)
test_dataset_1 = test_dataset.map(rot)

train_dataset_2 = train_dataset.map(flip)
test_dataset_2 = test_dataset.map(flip)

train_dataset_3 = train_dataset.map(cut)
test_dataset_3 = test_dataset.map(cut)

train_dataset = train_dataset.concatenate(train_dataset_1)
test_dataset = test_dataset.concatenate(test_dataset_1)
train_dataset = train_dataset.concatenate(train_dataset_2)
test_dataset = test_dataset.concatenate(test_dataset_2)
train_dataset = train_dataset.concatenate(train_dataset_3)
test_dataset = test_dataset.concatenate(test_dataset_3)

#查看图片
for image,label in test_dataset.skip(1200):
    break
image = image.numpy().reshape((150,150,3))
plt.figure()
plt.imshow(image,cmap=plt.cm.binary)
plt.colorbar()
plt.grid(True)
plt.show()

class_name = ['rock','paper','scissors']

plt.figure(figsize=(10,10))
i=0
for(image,label) in test_dataset.take(25):
    image = image.numpy().reshape((150,150,3))
    plt.subplot(5,5,i+1)
    plt.xticks([])
    plt.yticks([])
    plt.grid(True)
    plt.imshow(image,cmap=plt.cm.binary)
    plt.xlabel(class_name[label])
    i+=1
plt.show()


model = tf.keras.Sequential([
    tf.keras.layers.Conv2D(64,[3,3],padding='SAME',activation='relu',input_shape=[n,150,150,3]),
    tf.keras.layers.MaxPool2D([2,2],strides=2,padding='SAME'),

    tf.keras.layers.Conv2D(128,[3,3],padding='SAME',activation='relu'),
    tf.keras.layers.MaxPool2D([2,2],strides=2,padding='SAME'),

    tf.keras.layers.Conv2D(256,[3,3],padding='SAME',activation='relu'),
    tf.keras.layers.MaxPool2D([2,2],strides=2,padding='SAME'),

    tf.keras.layers.Conv2D(256,[3,3],padding='SAME',activation='relu'),
    tf.keras.layers.MaxPool2D([2,2],strides=2,padding='SAME'),

    tf.keras.layers.Dropout(0.3),   #断开30%的数据
    tf.keras.layers.Flatten(),   #数据打平
    tf.keras.layers.Dense(32,activation='relu'),
    tf.keras.layers.Dense(3,activation='softmax')
])


model.build([150,150,3])
model.summary()

#编译
model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
print("编译成功！")
BATCH_SIZE = 32
train_dataset = train_dataset.repeat().shuffle(4*num_train_examples).batch(BATCH_SIZE)
test_dataset = test_dataset.batch(BATCH_SIZE)

history = model.fit(train_dataset,epochs=2,steps_per_epoch=math.ceil((4*num_train_examples)/BATCH_SIZE))

acc = history.history['accuracy']
loss = history.history['loss']

epochs_range = range(2)

plt.figure(figsize=(8,8))
plt.subplot(1,2,1)
plt.plot(epochs_range,acc,label='Training Accuracy')
plt.legend(loc='lower right')
plt.title('Training Accuracy')

plt.subplot(1,2,2)
plt.plot(epochs_range,acc,label='Training loss')
plt.legend(loc='lower right')
plt.title('Training loss')
plt.show()


model.save('my_model_1.h5')
print('保存成功')


del model

restored_model = tf.keras.models.load_model('my_model_1.h5')
test_loss, test_accuracy = restored_model.evaluate(test_dataset,steps=math.ceil((4*num_test_examples)/32))
print('Accuracy on test dataset:',test_accuracy)
print('loss on test dataset:',test_loss)