"""
LeNet-5由LeCun等人于1998年提出，主要进行手写数字识别和英文字母识别，是经典的卷积神经网络。LeNet-5结构虽小，
但卷积层、池化层、全连接层各模块齐全，是学习 CNN的基础。
【Tensorflow 2.x】
"""
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, activations, losses, optimizers, metrics, callbacks
import os
import cv2 as cv
import numpy as np

tf.random.set_seed(1)
np.random.seed(1)
VER = 'v2.3'
ALPHA = 1e-4
BATCH_SIZE = 64
N_EPOCHS = 16

# 1. 导入飞、汽车、鸟的数据包
BASE_DIR, FILE_NAME = os.path.split(__file__)
dir = '../../../../large_data/DL1/_many_files/data3'
IMG_DIR = BASE_DIR + '/' + dir
LOG_DIR = os.path.join(BASE_DIR, '_log', FILE_NAME, VER)

# 2. 正确读入图片的文件信息
print('Reading images ...')
x = []
y = []
for file in os.listdir(IMG_DIR):
    path = os.path.join(IMG_DIR, file)
    base, ext = os.path.splitext(file)
    ext = ext.lower()
    if '.jpg' != ext:
        continue
    try:
        idx = int(base[0])
    except:
        continue
    if not 0 <= idx <= 2:
        continue
    y.append(idx)
    img = cv.imread(path, cv.IMREAD_COLOR)
    x.append(img)
x = np.uint8(x)
y = np.int64(y)
x_ = x.copy()
x = np.float32(x) / 255. * 2. - 1.
N, H, W, CH = x.shape
n_cls = len(np.unique(y))
print('Reading of image data over.')

# 3. 获取文件标签，进行独热编码（这里3分类）
y = np.eye(n_cls, dtype=np.int64)[y]

# 4. 进行洗牌乱序处理，自定义批量函数，实现每次处理一批数据
rand_idx = np.random.permutation(N)
x = x[rand_idx]
y = y[rand_idx]
x_ = x_[rand_idx]

# 5. 数据集切分为训练集和测试集
split_n = int(np.ceil(N * 0.8))
x_train, x_test = np.split(x, [split_n])
print('x_train', x_train.shape, x_train.dtype)
print('x_test', x_test.shape, x_test.dtype)
y_train, y_test = np.split(y, [split_n])
print('y_train', y_train.shape, y_train.dtype)
print('y_test', y_test.shape, y_test.dtype)
x_ori_train, x_ori_test = np.split(x_, [split_n])
print('x_ori_train', x_ori_train.shape, x_ori_train.dtype)
print('x_ori_test', x_ori_test.shape, x_ori_test.dtype)

# 6. 完成数据分析后，进入模型搭建工作（参照下图Lenet5）
# 7. 要求模型由两层卷积，两层池化，最后3层全连接网络
inputs = keras.Input((H, W, CH))
x = layers.Conv2D(6, (5, 5), (1, 1), 'valid')(inputs)
x = layers.MaxPool2D((2, 2), (2, 2), 'same')(x)
x = layers.Conv2D(16, (5, 5), (1, 1), 'valid')(x)
x = layers.MaxPool2D((2, 2), (2, 2), 'same')(x)
x = layers.Flatten()(x)
x = layers.Dense(120, activation=activations.relu)(x)
x = layers.Dense(84, activation=activations.relu)(x)
x = layers.Dense(n_cls, activation=activations.softmax)(x)
model = keras.Model(inputs, x)
model.compile(
    optimizer=optimizers.Adam(learning_rate=ALPHA),
    loss=losses.categorical_crossentropy,
    metrics=[metrics.categorical_accuracy]
)

# 8. 训练集进行模型训练，要求每次大循环结束输出loss值
model.fit(x_train,
          y_train,
          BATCH_SIZE,
          N_EPOCHS,
          callbacks=[callbacks.TensorBoard(LOG_DIR, update_freq='batch', profile_batch=0)],
          validation_data=(x_test, y_test),
          validation_batch_size=BATCH_SIZE,
          )

# 9. 加入测试集数据，计算模型准确率
print('Testing ...')
model.evaluate(x_test,
               y_test,
               BATCH_SIZE,
               )
print('Tested')

# 10. 随机抽取图片进行测试
spr = 3
spc = 3
spn = 0
plt.figure(figsize=[6, 6])
n_sample = spr * spc

x_sample = x_test[:n_sample]
y_sample = y_test[:n_sample]
x_ori_sample = x_ori_test[:n_sample]
pred_sample = model(x_sample)
y_sample = np.argmax(y_sample, axis=1)
pred_sample = np.argmax(pred_sample, axis=1)

for i in range(n_sample):
    spn += 1
    plt.subplot(spr, spc, spn)
    true = str(y_sample[i])
    pred = str(pred_sample[i])
    judge = 'V' if true == pred else 'X'
    title = f'{true}=>{pred} ({judge})'
    plt.title(title, color='black' if judge == 'V' else 'red')
    plt.axis('off')
    plt.imshow(x_ori_sample[i])
plt.show()
