import os
import sys
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import layers, activations, losses, optimizers, metrics
import matplotlib.pyplot as plt
import numpy as np

np.random.seed(777)
tf.random.set_seed(777)

VAL_RATE = 0.1
BATCH_SIZE = 64
N_EPOCHS = 4  # 为了快速演示，这个值设置的偏小，正式代码要适当调大一些
ALPHA = 0.001


# 1.
# 使用keras完成cifar2数据集的分类
# ①    数据处理
# 1)    定义一个有参有返回值的函数用于加载图片（7 分）
# 2)    合理定义相关参数（2 分）
# 3)    使用通道和自定义函数加载cifar2数据集（7 分）
def load_img(dirpath):
    filenames = os.listdir(dirpath)
    data = []
    for f in filenames:
        filepath = os.path.join(dirpath, f)
        data.append(plt.imread(filepath))
    data = np.array(data, dtype=np.float32)
    data /= 255.0
    return data


# dirs
root_dir = 'cifar2/'
train_dir = root_dir + 'train/'
test_dir = root_dir + 'test/'
train_plane_dir = train_dir + 'airplane'
train_auto_dir = train_dir + 'automobile'
test_plane_dir = test_dir + 'airplane'
test_auto_dir = test_dir + 'automobile'
# train plane
x_train_plane = load_img(train_plane_dir)
y_train_plane = np.ones((len(x_train_plane), 1))
# train auto
x_train_auto = load_img(train_auto_dir)
y_train_auto = np.zeros((len(x_train_auto), 1))
# test plane
x_test_plane = load_img(test_plane_dir)
y_test_plane = np.ones((len(x_test_plane), 1))
# test auto
x_test_auto = load_img(test_auto_dir)
y_test_auto = np.zeros((len(x_test_auto), 1))

# splice
x_train = np.r_[x_train_plane, x_train_auto]
m_train = len(x_train)
y_train = np.r_[y_train_plane, y_train_auto]
x_test = np.r_[x_test_plane, x_test_auto]
y_test = np.r_[y_test_plane, y_test_auto]

# shuffle
a = np.random.permutation(m_train)
x_train = x_train[a]
y_train = y_train[a]

# validation set
m_val = int(np.ceil(m_train * VAL_RATE))
m_train -= m_val
x_train, x_val = np.split(x_train, [-m_val])
y_train, y_val = np.split(y_train, [-m_val])

# data source
ds_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))\
    .shuffle(buffer_size=m_train)\
    .batch(batch_size=BATCH_SIZE)\
    .prefetch(buffer_size=tf.data.experimental.AUTOTUNE)

ds_val = tf.data.Dataset.from_tensor_slices((x_val, y_val))\
    .shuffle(buffer_size=m_train)\
    .batch(batch_size=BATCH_SIZE)\
    .prefetch(buffer_size=tf.data.experimental.AUTOTUNE)


# ②    模型搭建
# 1)    参照上图完成模型搭建（7 分）
# 2)    要求将模型封装在类内（7 分）
# 3)    进行正向传播（7 分）
class ConvBnReluPool(keras.Model):

    def __init__(self, filters, kernel, strides=1, padding='valid', **kwargs):
        super().__init__(**kwargs)
        self.conv = layers.Conv2D(filters, kernel, strides=strides, padding=padding)
        self.bn = layers.BatchNormalization()
        self.relu = layers.ReLU()
        self.pool = layers.MaxPool2D(2, 2, 'same')
        self.bn2 = layers.BatchNormalization()
        self.relu2 = layers.ReLU()

    def call(self, input, training=True):
        x = input
        x = self.conv(x, training=training)
        x = self.bn(x, training=training)
        x = self.relu(x, training=training)
        x = self.pool(x, training=training)
        x = self.bn2(x, training=training)
        x = self.relu2(x, training=training)
        return x


class MyClf(keras.Model):

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.cbr1 = ConvBnReluPool(6, 5)
        self.cbr2 = ConvBnReluPool(16, 5)
        self.flt = layers.Flatten()
        self.fc1 = layers.Dense(120, activation=activations.relu)
        self.fc2 = layers.Dense(84, activation=activations.relu)
        self.fc3 = layers.Dense(1, activation=activations.sigmoid)

    def call(self, input, training=True):
        x = input
        x = self.cbr1(x, training=training)
        x = self.cbr2(x, training=training)
        x = self.flt(x, training=training)
        x = self.fc1(x, training=training)
        x = self.fc2(x, training=training)
        x = self.fc3(x, training=training)
        return x


# ③    模型预测
# 1)    查看每层的参数数量（7 分）
model = MyClf()
model.build(input_shape=(None, 32, 32, 3))
model.summary()

# 2)    进行训练，选择Adam优化器，合理选择损失函数和迭代次数（7 分）
model.compile(
    optimizer=optimizers.Adam(learning_rate=ALPHA),
    loss=losses.binary_crossentropy,
    metrics=[metrics.binary_accuracy]
)
history = model.fit(ds_train,
          epochs=N_EPOCHS,
          validation_data=ds_val)
his = history.history
print(his)

# 3)    绘制训练集与测试集准确率对比图（7 分）
plt.plot(his['binary_accuracy'], label='Training Accuracy')
plt.plot(his['val_binary_accuracy'], label='Validation Accuracy')
result = model.evaluate(x_test, y_test)
print(result)
line = np.ones_like(his['val_binary_accuracy'])
line *= result[1]
plt.plot(line, label='Testing Accuracy')
plt.legend()
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.show()
