"""
1.	利用Tensorflow或Pytorch深度学习框架，建立由适当卷积层、池化层和全连接构成的CNN模型，进行“验证码”识别。
按下面要求完成CNN模型训练，并评估“验证码”识别的准确率。（65分）
"""
import tensorflow as tf
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers, activations, optimizers, losses, metrics, callbacks
from sklearn.model_selection import train_test_split
import os
import re
import cv2 as cv
import matplotlib.pyplot as plt

print('Program started.')
VER = 'v2.4'  # 模型训练后按这个版本号进行权重保存，之后再运行就会读取已保存权重，若要重新训练，请增加版本号
RANDOM_SEED = 777
RANDOM_SEED4SPLIT = 777  # 切分训练集、测试集的随机种子，不要修改！不然将把训练过的数据当测试集！
IMG_DIR = 'data/vcode'
ALPHA = 0.0001
BATCH_SIZE = 64
N_EPOCHS = 40

tf.random.set_seed(RANDOM_SEED)
np.random.seed(RANDOM_SEED)
FILE_NAME = os.path.basename(__file__)
LOG_DIR = os.path.join('_log', FILE_NAME, VER)
SAVE_DIR = os.path.join('_save', FILE_NAME, VER)
os.makedirs(SAVE_DIR, exist_ok=True)
SAVE_PREFIX = os.path.join(SAVE_DIR, 'weights')

# ①	读取数据函数read_data()，划分数据集为训练集和测试集
# regular expression for valid vocde file main name
vcode_file_name_regex = re.compile(r'^\d{4}$')


def read_data(dir):
    """Read dir to load vcode data set."""
    file_names = os.listdir(dir)

    # only pick valid vcode pictures
    vcode_file_names = []
    for file_name in file_names:
        main, ext = os.path.splitext(file_name)
        ext = ext.lower()
        if '.jpg' != ext:
            continue
        if vcode_file_name_regex.match(main) is None:
            continue
        vcode_file_names.append(file_name)

    # sort it for consistence
    sorted(vcode_file_names)

    # split it
    train_file_names, test_file_names = train_test_split(vcode_file_names, train_size=0.7, random_state=RANDOM_SEED4SPLIT)

    return train_file_names, test_file_names


def str2vec(str):
    return [int(ch) for ch in list(str)]


def vec2str(vec):
    return ''.join([str(ch) for ch in vec])


def load_dir(dir, file_names):
    x = []
    y = []
    for file_name in file_names:
        path = os.path.join(dir, file_name)

        # image data
        img = cv.imread(path, cv.IMREAD_COLOR)
        x.append(img)

        # label
        main = os.path.splitext(file_name)[0]
        vec = str2vec(main)
        y.append(vec)

    x = np.uint8(x)
    y = np.int32(y)
    return x, y


print('Loading data ...')
train_file_names, test_file_names = read_data(IMG_DIR)
x_train_uint8, y_train = load_dir(IMG_DIR, train_file_names)
x_test_uint8, y_test = load_dir(IMG_DIR, test_file_names)
M_TRAIN, PIC_H, PIC_W, PIC_CH = x_train_uint8.shape
M_TEST = len(x_test_uint8)
x_train = x_train_uint8.astype(np.float32) / 255.
x_test = x_test_uint8.astype(np.float32) / 255.
print('Loaded')

# ②	数据可视化（训练集数据随机20个数据，5行4列显示）
spr = 5
spc = 4
spn = 0
plt.figure(figsize=[10, 6])
rand_idx_train = np.random.permutation(M_TRAIN)
for idx in rand_idx_train[:spr * spc]:
    img = x_train_uint8[idx]
    vec = y_train[idx]
    label = vec2str(vec)
    spn += 1
    plt.subplot(spr, spc, spn)
    plt.title(label)
    plt.axis('off')
    plt.imshow(img)
print('Please check and close plotting window to go on.')
plt.show()

# ③	搭建网络模型
print('Start to build model.')
model = keras.Sequential([
    layers.Conv2D(8, (3, 3), (1, 1), 'same'),
    layers.BatchNormalization(),
    layers.ReLU(),
    layers.MaxPool2D((2, 2), (2, 2), 'same'),

    layers.Conv2D(16, (3, 3), (1, 1), 'same'),
    layers.BatchNormalization(),
    layers.ReLU(),
    layers.MaxPool2D((2, 2), (2, 2), 'same'),

    layers.Conv2D(32, (3, 3), (1, 1), 'same'),
    layers.BatchNormalization(),
    layers.ReLU(),
    layers.MaxPool2D((2, 2), (2, 2), 'same'),

    layers.Conv2D(64, (3, 3), (1, 1), 'same'),
    layers.BatchNormalization(),
    layers.ReLU(),
    layers.MaxPool2D((2, 2), (2, 2), 'same'),

    layers.Flatten(),
    layers.Dense(1024, activation=activations.relu),
    layers.Dense(512, activation=activations.relu),
    layers.Dense(40, activation=None),
    layers.Reshape([4, 10]),
    layers.Softmax()
])
model.build(input_shape=[None, PIC_H, PIC_W, PIC_CH])
model.summary()

# ④	模型编译
model.compile(
    loss=losses.sparse_categorical_crossentropy,
    optimizer=optimizers.Adam(learning_rate=ALPHA),
    metrics=metrics.sparse_categorical_accuracy
)

# ⑦	加载模型
if len(os.listdir(SAVE_DIR)) > 0:
    model.load_weights(SAVE_PREFIX)
else:
    # ⑤	模型训练
    model.fit(x_train, y_train,
              BATCH_SIZE, N_EPOCHS,
              validation_data=(x_test, y_test),
              validation_batch_size=BATCH_SIZE,
              callbacks=callbacks.TensorBoard(log_dir=LOG_DIR, update_freq='batch', profile_batch=0))

    # ⑦	保存模型
    model.save_weights(SAVE_PREFIX)

# ⑥	模型评估
print('Evaluating ...')
model.evaluate(x_test, y_test, batch_size=BATCH_SIZE)
print('Evaluated.')

# ⑧	预测（测试集数据随机10个数据，5行2列显示）
pred = model.predict(x_test, batch_size=BATCH_SIZE)
pred = pred.argmax(axis=2)

spr = 5
spc = 2
spn = 0
plt.figure(figsize=[6, 6])
rand_idx_test = np.random.permutation(M_TEST)
for idx in rand_idx_test[:spr * spc]:
    img = x_test_uint8[idx]

    vec = y_test[idx]
    label = vec2str(vec)

    pred_vec = pred[idx]
    pred_label = vec2str(pred_vec)

    acc_digits = vec == pred_vec
    acc_digits = np.float32(acc_digits).mean() * 100

    spn += 1
    plt.subplot(spr, spc, spn)
    plt.title(label + ': ' + pred_label + f' {acc_digits:.0f}%')
    plt.axis('off')
    plt.imshow(img)
plt.show()
