import os, shutil

root_path = "/home/python-test/py36-keras-demo01"
# 原始数据集解压目录的路径
original_dataset_dir = root_path + '/kaggle_original_data'
# 保存较小数据集的目录
base_dir = root_path + '/cats_and_dogs_small'
if not (os.path.exists(base_dir)):
    os.mkdir(base_dir)

# 分别对应划分后的训练、 验证和测试的目录
train_dir = os.path.join(base_dir, 'train')
if not (os.path.exists(train_dir)):
    os.mkdir(train_dir)
validation_dir = os.path.join(base_dir, 'validation')
if not (os.path.exists(validation_dir)):
    os.mkdir(validation_dir)
test_dir = os.path.join(base_dir, 'test')
if not (os.path.exists(test_dir)):
    os.mkdir(test_dir)

# 猫的训练图像目录
train_cats_dir = os.path.join(train_dir, 'cats')
if not (os.path.exists(train_cats_dir)):
    os.mkdir(train_cats_dir)

# 狗的训练图像目录
train_dogs_dir = os.path.join(train_dir, 'dogs')
if not (os.path.exists(train_dogs_dir)):
    os.mkdir(train_dogs_dir)

# 猫的验证图像目录
validation_cats_dir = os.path.join(validation_dir, 'cats')
if not (os.path.exists(validation_cats_dir)):
    os.mkdir(validation_cats_dir)

# 狗的验证图像目录
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
if not (os.path.exists(validation_dogs_dir)):
    os.mkdir(validation_dogs_dir)

# 猫的测试图像目录
test_cats_dir = os.path.join(test_dir, 'cats')
if not (os.path.exists(test_cats_dir)):
    os.mkdir(test_cats_dir)

# 狗的测试图像目录
test_dogs_dir = os.path.join(test_dir, 'dogs')
if not (os.path.exists(test_dogs_dir)):
    os.mkdir(test_dogs_dir)

# ----------------------------------------------如果已经复制过了，就省略这一步----------------------------------------------
# # 将前 1000 张猫的图像复制到 train_cats_dir
# fnames = ['cat.{}.jpg'.format(i) for i in range(1000)]
# # print(fnames)
# for fname in fnames:
#     src = os.path.join(original_dataset_dir, fname)
#     dst = os.path.join(train_cats_dir, fname)
#     shutil.copyfile(src, dst)
#
# # 将接下来 500 张猫的图像复制到 validation_cats_dir
# fnames = ['cat.{}.jpg'.format(i) for i in range(1000, 1500)]
# for fname in fnames:
#     src = os.path.join(original_dataset_dir, fname)
#     dst = os.path.join(validation_cats_dir, fname)
#     shutil.copyfile(src, dst)
#
# # 将接下来的 500 张猫的图像复制到 test_cats_dir
# fnames = ['cat.{}.jpg'.format(i) for i in range(1500, 2000)]
# for fname in fnames:
#     src = os.path.join(original_dataset_dir, fname)
#     dst = os.path.join(test_cats_dir, fname)
#     shutil.copyfile(src, dst)
#
# # 将前 1000 张狗的图像复制到 train_dogs_dir
# fnames = ['dog.{}.jpg'.format(i) for i in range(1000)]
# for fname in fnames:
#     src = os.path.join(original_dataset_dir, fname)
#     dst = os.path.join(train_dogs_dir, fname)
#     shutil.copyfile(src, dst)
#
# # 将接下来 500 张狗的图像复制到 validation_dogs_dir
# fnames = ['dog.{}.jpg'.format(i) for i in range(1000, 1500)]
# for fname in fnames:
#     src = os.path.join(original_dataset_dir, fname)
#     dst = os.path.join(validation_dogs_dir, fname)
#     shutil.copyfile(src, dst)
#
# # 将接下来 500 张狗的图像复制到 test_dogs_dir
# fnames = ['dog.{}.jpg'.format(i) for i in range(1500, 2000)]
# for fname in fnames:
#     src = os.path.join(original_dataset_dir, fname)
#     dst = os.path.join(test_dogs_dir, fname)
#     shutil.copyfile(src, dst)

# 看看每个分组（训练 / 验证 / 测试）中分别包含多少张图像。
# print('total training cat images:', len(os.listdir(train_cats_dir)))
# print('total training dog images:', len(os.listdir(train_dogs_dir)))
# print('total validation cat images:', len(os.listdir(validation_cats_dir)))
# print('total validation dog images:', len(os.listdir(validation_dogs_dir)))
# print('total test cat images:', len(os.listdir(test_cats_dir)))
# print('total test dog images:', len(os.listdir(test_dogs_dir)))

# -----------------------开始构建网络---------------------------------

# -----------------------01 模型定义（将猫狗分类的小型卷积神经网络实例化）---------------------------------

from keras import models
from keras import layers

model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))

# print(model.summary())

# Using TensorFlow backend.
# _________________________________________________________________
# Layer (type)                 Output Shape              Param #
# =================================================================
# conv2d_1 (Conv2D)            (None, 148, 148, 32)      896
# _________________________________________________________________
# max_pooling2d_1 (MaxPooling2 (None, 74, 74, 32)        0
# _________________________________________________________________
# conv2d_2 (Conv2D)            (None, 72, 72, 64)        18496
# _________________________________________________________________
# max_pooling2d_2 (MaxPooling2 (None, 36, 36, 64)        0
# _________________________________________________________________
# conv2d_3 (Conv2D)            (None, 34, 34, 128)       73856
# _________________________________________________________________
# max_pooling2d_3 (MaxPooling2 (None, 17, 17, 128)       0
# _________________________________________________________________
# conv2d_4 (Conv2D)            (None, 15, 15, 128)       147584
# _________________________________________________________________
# max_pooling2d_4 (MaxPooling2 (None, 7, 7, 128)         0
# _________________________________________________________________
# flatten_1 (Flatten)          (None, 6272)              0
# _________________________________________________________________
# dense_1 (Dense)              (None, 512)               3211776
# _________________________________________________________________
# dense_2 (Dense)              (None, 1)                 513
# =================================================================
# Total params: 3,453,121
# Trainable params: 3,453,121
# Non-trainable params: 0
# _________________________________________________________________

# -----------------------02 编译模型---------------------------------
from keras import optimizers

model.compile(loss='binary_crossentropy',
              optimizer=optimizers.RMSprop(lr=1e-4),
              metrics=['acc'])

# -----------------------03 数据预处理---------------------------------

# 你现在已经知道，将数据输入神经网络之前，应该将数据格式化为经过预处理的浮点数张量。
# 现在，数据以 JPEG 文件的形式保存在硬盘中，所以数据预处理步骤大致如下。
# (1) 读取图像文件。
# (2) 将 JPEG 文件解码为 RGB 像素网格。
# (3) 将这些像素网格转换为浮点数张量。
# (4) 将像素值（0~255 范围内）缩放到 [0, 1] 区间（正如你所知，神经网络喜欢处理较小的输
# 入值）。
# 这些步骤可能看起来有点吓人，但幸运的是， Keras 拥有自动完成这些步骤的工具。 Keras
# 有一个图像处理辅助工具的模块，位于 keras.preprocessing.image。特别地，它包含
# ImageDataGenerator 类，可以快速创建 Python 生成器，能够将硬盘上的图像文件自动转换
# 为预处理好的张量批量。


from keras.preprocessing.image import ImageDataGenerator

# 将所有图像乘以 1/255 缩放
train_datagen = ImageDataGenerator(rescale=1. / 255)
test_datagen = ImageDataGenerator(rescale=1. / 255)

# 目标目录（train_dir）
# 将所有图像的大小调整为 150×150（target_size=(150, 150)）
# 因为使用了 binary_crossentropy损失，所以需要用二进制标签（class_mode='binary'）
train_generator = train_datagen.flow_from_directory(
    train_dir,
    target_size=(150, 150),
    batch_size=20,
    class_mode='binary')
#
validation_generator = test_datagen.flow_from_directory(
    validation_dir,
    target_size=(150, 150),
    batch_size=20,
    class_mode='binary')

# 我们来看一下其中一个生成器的输出：它生成了 150×150 的 RGB 图像［形状为 (20,
# 150, 150, 3)］与二进制标签［形状为 (20,)］组成的批量。每个批量中包含 20 个样本（批
# 量大小）。注意，生成器会不停地生成这些批量，它会不断循环目标文件夹中的图像。因此，你
# 需要在某个时刻终止（break）迭代循环。
# for data_batch, labels_batch in train_generator:
#     print('data batch shape:', data_batch.shape)
#     print('labels batch shape:', labels_batch.shape)
#     break

# 输出：
# data batch shape: (20, 150, 150, 3)
# labels batch shape: (20,)

# -----------------------04 训练模型---------------------------------


# 我们将使用 fit_generator 方法来拟合，它在数据生成器上的效果和 fit 相同。
history = model.fit_generator(
    train_generator,
    steps_per_epoch=100,
    epochs=30,
    validation_data=validation_generator,
    validation_steps=50)

# -----------------------05 保存模型---------------------------------

model.save('cats_and_dogs_small_1.h5')

# -----------------------06 分别绘制训练过程中模型在训练数据和验证数据上的损失和精度---------------------------------

import matplotlib.pyplot as plt

acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(1, len(acc) + 1)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
# 这样matplotlib就不阻塞了
plt.show(block=False)
plt.show()
