import os, shutil

# 说明：这个test2相对于test1将保存模型更改为ModelCheckpoint保存最好的模型

root_path = "/home/python-test/py36-keras-demo01"
# 原始数据集解压目录的路径
original_dataset_dir = root_path + '/kaggle_original_data'
# 保存较小数据集的目录
base_dir = root_path + '/cats_and_dogs_small'
if not (os.path.exists(base_dir)):
    os.mkdir(base_dir)

# 分别对应划分后的训练、 验证和测试的目录
train_dir = os.path.join(base_dir, 'train')
if not (os.path.exists(train_dir)):
    os.mkdir(train_dir)
validation_dir = os.path.join(base_dir, 'validation')
if not (os.path.exists(validation_dir)):
    os.mkdir(validation_dir)
test_dir = os.path.join(base_dir, 'test')
if not (os.path.exists(test_dir)):
    os.mkdir(test_dir)

# 猫的训练图像目录
train_cats_dir = os.path.join(train_dir, 'cats')
if not (os.path.exists(train_cats_dir)):
    os.mkdir(train_cats_dir)

# 狗的训练图像目录
train_dogs_dir = os.path.join(train_dir, 'dogs')
if not (os.path.exists(train_dogs_dir)):
    os.mkdir(train_dogs_dir)

# 猫的验证图像目录
validation_cats_dir = os.path.join(validation_dir, 'cats')
if not (os.path.exists(validation_cats_dir)):
    os.mkdir(validation_cats_dir)

# 狗的验证图像目录
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
if not (os.path.exists(validation_dogs_dir)):
    os.mkdir(validation_dogs_dir)

# 猫的测试图像目录
test_cats_dir = os.path.join(test_dir, 'cats')
if not (os.path.exists(test_cats_dir)):
    os.mkdir(test_cats_dir)

# 狗的测试图像目录
test_dogs_dir = os.path.join(test_dir, 'dogs')
if not (os.path.exists(test_dogs_dir)):
    os.mkdir(test_dogs_dir)

# ----------------------------------------------如果已经复制过了，就省略这一步----------------------------------------------
# # 将前 1000 张猫的图像复制到 train_cats_dir
# fnames = ['cat.{}.jpg'.format(i) for i in range(1000)]
# # print(fnames)
# for fname in fnames:
#     src = os.path.join(original_dataset_dir, fname)
#     dst = os.path.join(train_cats_dir, fname)
#     shutil.copyfile(src, dst)
#
# # 将接下来 500 张猫的图像复制到 validation_cats_dir
# fnames = ['cat.{}.jpg'.format(i) for i in range(1000, 1500)]
# for fname in fnames:
#     src = os.path.join(original_dataset_dir, fname)
#     dst = os.path.join(validation_cats_dir, fname)
#     shutil.copyfile(src, dst)
#
# # 将接下来的 500 张猫的图像复制到 test_cats_dir
# fnames = ['cat.{}.jpg'.format(i) for i in range(1500, 2000)]
# for fname in fnames:
#     src = os.path.join(original_dataset_dir, fname)
#     dst = os.path.join(test_cats_dir, fname)
#     shutil.copyfile(src, dst)
#
# # 将前 1000 张狗的图像复制到 train_dogs_dir
# fnames = ['dog.{}.jpg'.format(i) for i in range(1000)]
# for fname in fnames:
#     src = os.path.join(original_dataset_dir, fname)
#     dst = os.path.join(train_dogs_dir, fname)
#     shutil.copyfile(src, dst)
#
# # 将接下来 500 张狗的图像复制到 validation_dogs_dir
# fnames = ['dog.{}.jpg'.format(i) for i in range(1000, 1500)]
# for fname in fnames:
#     src = os.path.join(original_dataset_dir, fname)
#     dst = os.path.join(validation_dogs_dir, fname)
#     shutil.copyfile(src, dst)
#
# # 将接下来 500 张狗的图像复制到 test_dogs_dir
# fnames = ['dog.{}.jpg'.format(i) for i in range(1500, 2000)]
# for fname in fnames:
#     src = os.path.join(original_dataset_dir, fname)
#     dst = os.path.join(test_dogs_dir, fname)
#     shutil.copyfile(src, dst)

# 看看每个分组（训练 / 验证 / 测试）中分别包含多少张图像。
# print('total training cat images:', len(os.listdir(train_cats_dir)))
# print('total training dog images:', len(os.listdir(train_dogs_dir)))
# print('total validation cat images:', len(os.listdir(validation_cats_dir)))
# print('total validation dog images:', len(os.listdir(validation_dogs_dir)))
# print('total test cat images:', len(os.listdir(test_cats_dir)))
# print('total test dog images:', len(os.listdir(test_dogs_dir)))
#


# -----------------------显示几个随机增强后的训练图像---------------------------------

# from keras.preprocessing.image import ImageDataGenerator
# from keras.preprocessing import image
# import matplotlib.pyplot as plt
#
# datagen = ImageDataGenerator(
#     rotation_range=40,
#     width_shift_range=0.2,
#     height_shift_range=0.2,
#     shear_range=0.2,
#     zoom_range=0.2,
#     horizontal_flip=True,
#     fill_mode='nearest')
#
# fnames = [os.path.join(train_cats_dir, fname) for fname in os.listdir(train_cats_dir)]
#
# img_path = fnames[3]  # 选择一张图像进行增强
# img = image.load_img(img_path, target_size=(150, 150))  # 读取图像并调整大小
# x = image.img_to_array(img)  # 将其转换为形状 (150, 150, 3) 的 Numpy 数组
# x = x.reshape((1,) + x.shape)  # 将其形状改变为 (1, 150, 150, 3) # 要求4维
# i = 0
# # 生成随机变换后的图像批量。循环是无限的，因此你需要在某个时刻终止循环
# for batch in datagen.flow(x, batch_size=1):
#     plt.figure(i)
#     imgplot = plt.imshow(image.array_to_img(batch[0]))  # array_to_img 将3维 Numpy数组转换为PIL（Python Imaging Library ）图像实例。
#     i += 1
#     if i % 4 == 0:
#         break
# plt.show(block=False)
# plt.show()

# -----------------------开始构建网络---------------------------------

# -----------------------01 模型定义（定义一个包含 dropout 的新卷积神经网络）---------------------------------

from keras import models
from keras import layers

model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
#
# print(model.summary())
# _________________________________________________________________
# Layer (type)                 Output Shape              Param #
# =================================================================
# conv2d_1 (Conv2D)            (None, 148, 148, 32)      896
# _________________________________________________________________
# max_pooling2d_1 (MaxPooling2 (None, 74, 74, 32)        0
# _________________________________________________________________
# conv2d_2 (Conv2D)            (None, 72, 72, 64)        18496
# _________________________________________________________________
# max_pooling2d_2 (MaxPooling2 (None, 36, 36, 64)        0
# _________________________________________________________________
# conv2d_3 (Conv2D)            (None, 34, 34, 128)       73856
# _________________________________________________________________
# max_pooling2d_3 (MaxPooling2 (None, 17, 17, 128)       0
# _________________________________________________________________
# conv2d_4 (Conv2D)            (None, 15, 15, 128)       147584
# _________________________________________________________________
# max_pooling2d_4 (MaxPooling2 (None, 7, 7, 128)         0
# _________________________________________________________________
# flatten_1 (Flatten)          (None, 6272)              0
# _________________________________________________________________
# dropout_1 (Dropout)          (None, 6272)              0
# _________________________________________________________________
# dense_1 (Dense)              (None, 512)               3211776
# _________________________________________________________________
# dense_2 (Dense)              (None, 1)                 513
# =================================================================
# Total params: 3,453,121
# Trainable params: 3,453,121
# Non-trainable params: 0
# _________________________________________________________________


# -----------------------02 编译模型---------------------------------
from keras import optimizers

model.compile(loss='binary_crossentropy',
              optimizer=optimizers.RMSprop(lr=1e-4),
              metrics=['acc'])

# -----------------------03 数据预处理(利用数据增强生成器训练卷积神经网络)---------------------------------
from keras.preprocessing.image import ImageDataGenerator

train_datagen = ImageDataGenerator(
    rescale=1. / 255,
    rotation_range=40,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True, )
test_datagen = ImageDataGenerator(rescale=1. / 255)  # 注意，不能增强验证数据
train_generator = train_datagen.flow_from_directory(
    train_dir,  # 目标目录
    target_size=(150, 150),  # 将所有图像的大小调整为 150×150
    batch_size=32,
    class_mode='binary')  # 因为使用了 binary_crossentropy损失，所以需要用二进制标签
validation_generator = test_datagen.flow_from_directory(
    validation_dir,
    target_size=(150, 150),
    batch_size=32,
    class_mode='binary')

# -----------------------04 训练模型 && 保存最好的模型---------------------------------

from keras.callbacks import ModelCheckpoint

filepath = "weights-improvement-{epoch:02d}-{val_acc:.4f}.hdf5"
# ModelCheckpoint保存模型
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True,
                             mode='max')
callbacks_list = [checkpoint]

history = model.fit_generator(
    train_generator,
    steps_per_epoch=100,
    epochs=100,
    validation_data=validation_generator,
    validation_steps=50,
    callbacks=callbacks_list)

# -----------------------06 分别绘制训练过程中模型在训练数据和验证数据上的损失和精度---------------------------------

import matplotlib.pyplot as plt

acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(1, len(acc) + 1)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
# 这样matplotlib就不阻塞了
plt.show(block=False)
plt.show()
