import sys
import keras
import os, shutil

# 解压数据集后，存放 25000 张训练图像的文件夹，图片命名为 cat.xxx.jpg, dog.xxx.jpg，xxx是编号，从 0 到 12499
original_dataset_dir = './train'

base_dir = './cats_and_dogs_small' # 保存较小数据集的目录（4000张图像）
#os.mkdir(base_dir)

# 分别创建训练集，验证集和测试集图像存放的文件夹
train_dir = os.path.join(base_dir, 'train')  # 训练集
#os.mkdir(train_dir)
validation_dir = os.path.join(base_dir, 'validation') # 验证集
#os.mkdir(validation_dir)
test_dir = os.path.join(base_dir, 'test') # 测试集
#os.mkdir(test_dir)

# 在上面3个文件夹中，每个文件夹中分别创建猫，狗的图像目录
train_cats_dir = os.path.join(train_dir, 'cats')  # 猫的训练图像目录
#os.mkdir(train_cats_dir)
train_dogs_dir = os.path.join(train_dir, 'dogs')  # 狗的训练图像目录
#os.mkdir(train_dogs_dir)
validation_cats_dir = os.path.join(validation_dir, 'cats')  # 猫的验证图像目录
#os.mkdir(validation_cats_dir)
validation_dogs_dir = os.path.join(validation_dir, 'dogs')  # 狗的验证图像目录
#os.mkdir(validation_dogs_dir)
test_cats_dir = os.path.join(test_dir, 'cats')  # 猫的测试图像目录
##os.mkdir(test_cats_dir)
test_dogs_dir = os.path.join(test_dir, 'dogs')  # 狗的测试图像目录
#os.mkdir(test_dogs_dir)


print('猫的训练集图像数量', len(os.listdir(train_cats_dir)))
print('狗的训练集图像数量', len(os.listdir(train_dogs_dir)))
print('-----------------------------------')
print('猫的验证集图像数量', len(os.listdir(validation_cats_dir)))
print('狗的验证集图像数量', len(os.listdir(validation_dogs_dir)))
print('-----------------------------------')
print('猫的测试集图像数量', len(os.listdir(test_cats_dir)))
print('狗的测试集图像数量', len(os.listdir(test_dogs_dir)))

from keras import layers
from keras import models

# 使用 Sequential
model = models.Sequential()

# 第1个 Conv2D + MaxPooling2D 组合
model.add(layers.Conv2D(32, (3, 3), activation='relu',input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D((2, 2)))

# 第2个 Conv2D + MaxPooling2D 组合
model.add(layers.Conv2D(64, (3, 3), activation="relu"))
model.add(layers.MaxPooling2D((2, 2)))

# 第3个 Conv2D + MaxPooling2D 组合
model.add(layers.Conv2D(128, (3, 3), activation="relu"))
model.add(layers.MaxPooling2D((2, 2)))

# 第4个 Conv2D + MaxPooling2D 组合
model.add(layers.Conv2D(128, (3, 3), activation="relu"))
model.add(layers.MaxPooling2D((2, 2)))

model.add(layers.Flatten())
model.add(layers.Dense(512, activation="relu"))
model.add(layers.Dense(1, activation="sigmoid"))

from tensorflow import optimizers

# 编译模型
model.compile(loss="binary_crossentropy", optimizer=optimizers.RMSprop(lr=1e-4), metrics=['acc'])

from keras.preprocessing.image import ImageDataGenerator

# 将所有的图像乘以 1/255 缩放
train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_directory(train_dir, target_size=(150, 150), batch_size=20, class_mode="binary")

validation_generator = test_datagen.flow_from_directory(validation_dir, target_size=(150, 150), batch_size=20, class_mode="binary")

for data_batch, labels_batch in train_generator:
    print('图像数据组成的批量:', data_batch.shape)
    print('二进制标签组成的批量:', labels_batch.shape)
    break
history = model.fit_generator(train_generator,steps_per_epoch=100,epochs=2,validation_data=validation_generator,validation_steps=50)
model.save("cats_and_dogs_small_1.h5")
import matplotlib.pyplot as plt

acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()

plt.figure()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend(
plt.show()

datagen = ImageDataGenerator(
    rotation_range=40, 
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
    fill_mode="nearest"
)
from keras import layers
from keras import models

model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation = "relu", input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D((2, 2)))

model.add(layers.Conv2D(64, (3, 3), activation = "relu"))
model.add(layers.MaxPooling2D((2, 2)))

model.add(layers.Conv2D(128, (3, 3), activation = "relu"))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation = 'relu'))
model.add(layers.MaxPooling2D((2, 2)))

model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation = "relu"))
model.add(layers.Dense(1, activation = "sigmoid"))
model.compile(loss="binary_crossentropy", optimizer=optimizers.RMSprop(lr=1e-4), metrics=["acc"])

train_datagen = ImageDataGenerator(rescale=1./255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True,)

# 这里不能增强验证数据
test_datagen = ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_directory(
        # 目标目录
        train_dir,
        # 将所有图像大小调整为 150 * 150
        target_size=(150, 150),
        batch_size=32,
        # 因为使用了 binary_crossentropy 损失，所以需要使用二进制标签
        class_mode='binary')

validation_generator = test_datagen.flow_from_directory(
        validation_dir,
        target_size=(150, 150),
        batch_size=32,
        class_mode='binary')

history = model.fit_generator(
      train_generator,
      steps_per_epoch=100,
      epochs=100,
      validation_data=validation_generator,
      validation_steps=50)
