import tensorflow as tf

from tensorflow import keras
import matplotlib.pyplot as plt
import numpy as np
import glob

imgs_path = glob.glob('CUB_200_2011/images/*/*.jpg')
all_labels_name = [img_p.split('\\')[1].split('.')[1] for img_p in imgs_path]

label_to_index = dict((name, i) for i, name in enumerate(np.unique(all_labels_name)))
index_to_label = dict((v, k) for k, v in label_to_index.items())

all_labels = [label_to_index.get(name) for name in all_labels_name]

np.random.seed(2021)
random_index = np.random.permutation(len(imgs_path))

imgs_path = np.array(imgs_path)[random_index]
all_labels = np.array(all_labels)[random_index]

i = int(len(imgs_path) * 0.8)
train_path = imgs_path[:i]
train_labels = all_labels[:i]
test_path = imgs_path[i:]
test_labels = all_labels[i:]

train_dataset = tf.data.Dataset.from_tensor_slices((train_path, train_labels))
test_dataset = tf.data.Dataset.from_tensor_slices((test_path, test_labels))


def load_img(path, label):
    image = tf.io.read_file(path)
    image = tf.image.decode_jpeg(image, channels=3)
    image = tf.image.resize(image, [256, 256])
    image = tf.cast(image, tf.float32)
    image = image / 255
    return image, label


AUTOTUNE = tf.data.experimental.AUTOTUNE
train_dataset = train_dataset.map(load_img, num_parallel_calls=AUTOTUNE)
test_dataset = test_dataset.map(load_img, num_parallel_calls=AUTOTUNE)

BATCH_SIZE = 8
train_dataset = train_dataset.repeat().shuffle(100).batch(BATCH_SIZE)
test_dataset = test_dataset.batch(BATCH_SIZE)

model = tf.keras.Sequential([
    tf.keras.layers.Conv2D(64, (3, 3), input_shape=(256, 256, 3), activation='relu'),
    tf.keras.layers.BatchNormalization(),
    tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
    tf.keras.layers.BatchNormalization(),
    tf.keras.layers.MaxPool2D(),

    tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
    tf.keras.layers.BatchNormalization(),
    tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
    tf.keras.layers.BatchNormalization(),
    tf.keras.layers.MaxPool2D(),

    tf.keras.layers.Conv2D(256, (3, 3), activation='relu'),
    tf.keras.layers.BatchNormalization(),
    tf.keras.layers.Conv2D(256, (3, 3), activation='relu'),
    tf.keras.layers.BatchNormalization(),
    tf.keras.layers.MaxPool2D(),

    tf.keras.layers.Conv2D(512, (3, 3), activation='relu'),
    tf.keras.layers.BatchNormalization(),
    tf.keras.layers.Conv2D(512, (3, 3), activation='relu'),
    tf.keras.layers.BatchNormalization(),
    tf.keras.layers.MaxPool2D(),

    tf.keras.layers.Conv2D(512, (3, 3), activation='relu'),
    tf.keras.layers.BatchNormalization(),
    tf.keras.layers.Conv2D(512, (3, 3), activation='relu'),
    tf.keras.layers.BatchNormalization(),
    tf.keras.layers.Conv2D(512, (3, 3), activation='relu'),
    tf.keras.layers.BatchNormalization(),
    tf.keras.layers.GlobalAveragePooling2D(),
    tf.keras.layers.Dense(1024, activation='relu'),
    tf.keras.layers.BatchNormalization(),
    tf.keras.layers.Dense(200)
])

# print(model.summary())

model.compile(optimizer=tf.keras.optimizers.Adam(0.0001),
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
              metrics=['acc'])

train_count = len(train_path)
test_count = len(test_path)

steps_per_epoch = train_count // BATCH_SIZE
validation_steps = test_count // BATCH_SIZE

history = model.fit(train_dataset, epochs=50,
                    steps_per_epoch=steps_per_epoch,
                    validation_data=test_dataset,
                    validation_steps=validation_steps)

model.save('model.h5')
# history.history.keys()

# 正确率的变化
plt.plot(history.epoch, history.history.get('acc'), label='acc')
plt.plot(history.epoch, history.history.get('val_acc'), label='val_acc')
plt.legend()
# 过拟合, 训练数据已经..., 上升趋势, epoch不够


# loss的变化
plt.plot(history.epoch, history.history.get('loss'), label='loss')
plt.plot(history.epoch, history.history.get('val_loss'), label='val_loss')
plt.legend()
# loss下降趋势

def load_preprocess_image(path):
    image = tf.io.read_file(path)
    image = tf.image.decode_jpeg(image, channels=3)
    image = tf.image.resize(image, [256, 256])
    image = tf.cast(image, tf.float32)
    image = image / 255.0
    return image


def predict_image(test_img_path):
    test_tensor = load_preprocess_image(test_img_path)
    test_tensor = tf.expand_dims(test_tensor, axis=0)
    predict = model.predict(test_tensor)
    return index_to_label.get(np.argmax(predict))
