"""
https://blog.csdn.net/menghaocheng/article/details/102783705

【TF2.0-CNN】迁移学习（将inceptionV3应用到猫狗分类）
"""

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, losses, optimizers, callbacks, metrics
import numpy as np
import os
import sys
import cv2 as cv
from sklearn.model_selection import train_test_split

data_path = r'../../../../../large_data/DL1/_many_files/zoo'
IMG_H = 150
IMG_W = 150
BATCH_SIZE = 4
N_EPOCHS = 8
ALPHA = 1e-3
VER = 'v2.4'
FILE_NAME = os.path.basename(__file__)
LOG_DIR = os.path.join('_log', FILE_NAME, VER)

gpus = tf.config.experimental.list_physical_devices('GPU')
print(gpus)
if gpus:
    try:
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)
    except RuntimeError as e:
        print(e)

local_weights_file = '../../../../../large_data/model/inceptionV3/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
inceptionV3 = keras.applications.InceptionV3(include_top=False,
                                             weights=local_weights_file,
                                             input_shape=[IMG_H, IMG_W, 3],
                                             pooling='avg')

inceptionV3.trainable = False  # Freeze the outer model

assert inceptionV3.trainable == False  # All layers in `model` are now frozen

inputs = keras.Input(shape=(IMG_H, IMG_W, 3))

x = inceptionV3(inputs, training=False)
x = layers.Dense(2)(x)
model = keras.Model(inputs, x)
model.summary()

test_data = np.zeros((4, 150, 150, 3), dtype=np.float32)
pred = inceptionV3.predict(test_data)
print('pred.shape', pred.shape)

def get_pic_data(dir):
    files = os.listdir(dir)
    x = []
    for file in files:
        path = os.path.join(dir, file)
        img = cv.imread(path, cv.IMREAD_COLOR)
        img = cv.resize(img, (IMG_H, IMG_W))
        img = img.astype(np.float32) / 255. * 2. - 1.
        x.append(img)
    x = np.float32(x)
    return x


x_cat = get_pic_data(os.path.join(data_path, 'cat'))
print('x_cat:', x_cat.shape)
n_cat = len(x_cat)
x_dog = get_pic_data(os.path.join(data_path, 'dog'))
print('x_dog:', x_dog.shape)
n_dog = len(x_dog)
x = np.concatenate([x_cat, x_dog], axis=0)
# x = np.transpose(x, [0, 3, 1, 2])  # not needed by tf
y_cat = np.full([n_cat], 0, dtype=np.int32)
print('y_cat:', y_cat.shape)
y_dog = np.full([n_dog], 1, dtype=np.int32)
print('y_dog:', y_dog.shape)
y = np.concatenate([y_cat, y_dog], axis=0)
print('x:', x.shape)
print('y:', y.shape)
x_train, x_val_test, y_train, y_val_test = train_test_split(x, y, train_size=0.8, random_state=1, shuffle=True)
x_val, x_test, y_val, y_test = train_test_split(x_val_test, y_val_test, train_size=0.5, random_state=1, shuffle=True)
print('x_train', x_train.shape)
print('x_val', x_val.shape)
print('x_test', x_test.shape)
print('y_train', y_train.shape)
print('y_val', y_val.shape)
print('y_test', y_test.shape)

dl_train = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(1000) \
    .batch(BATCH_SIZE, drop_remainder=True).prefetch(tf.data.experimental.AUTOTUNE)
dl_test = tf.data.Dataset.from_tensor_slices((x_test, y_test)).shuffle(1000) \
    .batch(BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)
dl_val = tf.data.Dataset.from_tensor_slices((x_val, y_val)).shuffle(1000) \
    .batch(BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)

model.compile(
    optimizer=optimizers.Adam(learning_rate=ALPHA),
    loss=losses.SparseCategoricalCrossentropy(from_logits=True),
    metrics=[metrics.sparse_categorical_accuracy]
)

model.fit(dl_train,
          epochs=N_EPOCHS,
          validation_data=dl_val,
          callbacks=[callbacks.TensorBoard(LOG_DIR, update_freq='batch', profile_batch=0)]
          )

model.evaluate(dl_test)
