# coding:utf-8
import glob
import os
import matplotlib.pyplot as plt
from keras import Model
from keras.applications.inception_v3 import preprocess_input, InceptionV3
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import GlobalAveragePooling2D, Dense
from pathlib import Path

IM_WIDTH = 299
IM_HEIGHT = 299
train_dir = r"F:\cat_dog\train_dir"
val_dir = r'F:\cat_dog\val_dir'
batch_size = 10
FC_size = 1024
NB_IV3_LAYERS_TO_FREEZE = 172
nb_epoch = 3
output_model_file="./catdog.h5"


def get_nb_files(directory):
  """Get number of files by searching directory recursively"""
  if not Path(directory).exists():
      return 0
  cnt = 0
  for r, dirs, files in os.walk(directory):
    for dr in dirs:
      cnt += len(glob.glob(os.path.join(r,dr + "/*")))
  return cnt


# 增强
train_datagen = ImageDataGenerator(
    preprocessing_function=preprocess_input,  # 图像识别中预处理环节
    rotation_range=30,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True
)
test_datagen = ImageDataGenerator(
    preprocessing_function=preprocess_input,
    rotation_range=30,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True
)
train_generator = train_datagen.flow_from_directory(
  train_dir,
  target_size=(IM_WIDTH, IM_HEIGHT),
  batch_size=batch_size,
)
validation_generator = test_datagen.flow_from_directory(
  val_dir,
  target_size=(IM_WIDTH, IM_HEIGHT),
  batch_size=batch_size,
)

# 迁移模型之基本模型
base_model = InceptionV3(weights='imagenet',include_top=False)  # 去除ImageNet网络的全连接层权重


"去除ImageNet网络的全连接层权重，因为这是针对ImageNet竞赛的1000种日常对象预先训练好的网络权重。"
"因此，我们将添加一个新的全连接层，并进行初始化。"
def add_new_last_layer(base_model,nb_classes):
    """Add last layer to the convnet
      Args:
        base_model: keras model excluding top
        nb_classes: # of classes
      Returns:
        new keras model with last layer
    """
    x = base_model.output
    # FIXME:全局池化 全局平均初始化函数GlobalAveragePooling2D将MxNxC张量转换后输出为1xC张量，其中C是图像的通道数。
    x = GlobalAveragePooling2D()(x)

    x=Dense(FC_size,activation='relu')(x) # 全链接可以理解
    predictions = Dense(nb_classes,activation='softmax')(x)

    model = Model(input=base_model.input,output=predictions)
    return model

# REW:1. 迁移学习：除去倒数第二层，固定所有其他层的参数，并重新训练最后一层全连接层。
# REW:2. 微调：固定用来提取低级特征的底部卷积层，并重新训练更多的网络层。
# REW: 低级特征由前面的卷积层提取，后面提取的是高级特征

# 另一种设置权重的方式
# assert os.path.exists(weights_path), 'Model weights not found (see "weights_path" variable in script).'
# f = h5py.File(weights_path)
# for k in range(f.attrs['nb_layers']):
#     if k >= len(model.layers):
#         # we don't look at the last (fully-connected) layers in the savefile
#         break
#     g = f['layer_{}'.format(k)]
#     weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
#     model.layers[k].set_weights(weights)
# f.close()

# 迁移学习
def setup_to_transfer_learn(model:Model,base_model):
    """Freeze all layers and compile the model"""
    for layer in base_model.layers:
        # REW:因为如果不固定相关层，随机初始化网络权重会导致较大的梯度更新，进一步可能会破坏卷积层中的学习权重
        layer.trainable=False
    model.compile(optimizer="rmsprop",
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])


# 微调
def setup_to_finetuen(model:Model):
    """Freeze the bottom NB_IV3_LAYERS and retrain the remaining top
          layers.
       note: NB_IV3_LAYERS corresponds to the top 2 inception blocks in
             the inceptionv3 architecture
       Args:
         model: keras model
       """
    for layer in model.layers[:NB_IV3_LAYERS_TO_FREEZE]:
        layer.trainable = False
    for layer in model.layers[NB_IV3_LAYERS_TO_FREEZE:]:
        layer.trainable = True
    # 微调过程中，最重要的是与网络从头开始训练时所使用的速率相比（lr = 0.0001），要降低学习率，否则优化过程可能不稳定，Loss函数可能会发散
    model.compile(optimizer=SGD(lr=0.0001, momentum=0.9),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])


model = add_new_last_layer(base_model,2)


def tl_train():
    nb_train_samples = get_nb_files(train_dir)
    nb_classes = len(glob.glob(train_dir + "/*"))
    nb_val_samples = get_nb_files(val_dir)
    setup_to_transfer_learn(model,base_model)

    history_tl = model.fit_generator(
        train_generator,
        epochs=nb_epoch,
        steps_per_epoch=nb_train_samples,
        validation_data=validation_generator,
        validation_steps=nb_val_samples,
        class_weight='auto'
    )
    model.save(output_model_file)
    plot_training(history_tl)


def ft_train():
    nb_train_samples = get_nb_files(train_dir)
    nb_classes = len(glob.glob(train_dir + "/*"))
    nb_val_samples = get_nb_files(val_dir)

    setup_to_finetuen(model)

    history_ft = model.fit_generator(
        train_generator,
        epochs=nb_epoch,
        steps_per_epoch=nb_train_samples,
        validation_data=validation_generator,
        validation_steps=nb_val_samples,
        class_weight='auto'
    )
    model.save(output_model_file)

    plot_training(history_ft)


def plot_training(history):
  acc = history.history['acc']
  val_acc = history.history['val_acc']
  loss = history.history['loss']
  val_loss = history.history['val_loss']
  epochs = range(len(acc))

  plt.plot(epochs, acc, 'r.')
  plt.plot(epochs, val_acc, 'r')
  plt.title('Training and validation accuracy')

  plt.figure()
  plt.plot(epochs, loss, 'r.')
  plt.plot(epochs, val_loss, 'r-')
  plt.title('Training and validation loss')
  plt.show()


'''
采集数据
增强数据
数据预处理
搭建模型
模型编译 参数设置
训练
'''
tl_train()