import os
import sys
import re
import tensorflow.compat.v1 as tf
import tensorflow as tsf
import numpy as np
import matplotlib.pyplot as plt


np.random.seed(777)
tf.set_random_seed(777)

# 1.	通过对分类问题的了解，完成作业：
# (1)	完成验证码数据集的cnn；
# ①	导入相关对应的数据包
# ③	完成卷积前的参数设置。
alpha = 0.01  # learning rate
batch_size = 50  # size of mini-batches
n_epoch = 10  # how many epoch
data_path = r'./data/weekly03'  # path to data


# ②	加载如程序里vcode数据。（数据文件老师会给）
# ④	正确数据转换，读取数据的标签，将标签转化向量模式。
# ⑥	创建get_all_files函数，获取图片路径，进行数据转换，变成程序可执行数据。最后得出，样本图片数据列表，图片标签列表。
# ⑤	创建函数，将读进来的数进行彩色灰度转换。
n_height = 32  # picture height
n_width = 32  # picture width
n_channel = 3  # picture channels


def get_all_files(data_path):
    """
    Get all data from files

    :param data_path: The path to the directory of files
    :return: tuple(x, y): The data x in shape (batch, height, width, channels)
    and the labels y in shape (batch, class_label)
    """
    x, y = [], []
    all_filenames = os.listdir(data_path)  # get all file names of data
    xregexp = re.compile(r'^(\d+)_')  # the regular expression to extract class information of files
    for xfilename in all_filenames:
        # get label
        xmatch = xregexp.match(xfilename)
        if xmatch is None:
            continue
        cls_no = int(xmatch[1])

        # get data
        buff = plt.imread(data_path + '/' + xfilename, 'jpg')
        xdata = np.frombuffer(buff, dtype=np.uint8)  # ATTENTION This is not necessary, imread returns a good numpy ndarray.

        x.append(xdata)
        y.append(cls_no)

    # tidy data
    x = np.array(x, dtype=np.float32)
    y = np.array(y)
    x = x.reshape([-1, n_height, n_width, n_channel])
    return x, y


# ⑦	调用以上get_all_files函数，加载进来相关数据。
x, y = get_all_files(data_path)

# scale data
xmin = x.min()
xmax = x.max()
x -= xmin
x /= xmax - xmin

# shuffle data
m = len(y)
n_cls = len(np.unique(y))
a = np.random.permutation(m)
x = x[a]
y = y[a]

# one hot
y = np.eye(n_cls)[y]

# split data
test_rate = 0.05
m_test = int(round(m * test_rate))
m_train = m - m_test
x_train, x_test = np.split(x, [m_train])
y_train, y_test = np.split(y, [m_train])

# ⑧	自定义小批量函数，实现一次批量数据运行。
global_idx = 0


def next_batch(batch_size):
    global global_idx
    bx = x_train[global_idx:global_idx+batch_size]
    by = y_train[global_idx:global_idx+batch_size]
    global_idx += batch_size
    if global_idx >= m_train:
        global_idx = 0
    return bx, by


# ⑨	进行卷积操作，要求2层卷积，每层卷积后加上最大池化。卷积核3*3，s=1，加入0填充。池化核2*2，s=2.
ph_x = tf.placeholder(tf.float32, [None, n_height, n_width, n_channel], 'ph_x')
ph_y = tf.placeholder(tf.int32, [None, n_cls], 'ph_x')
with tf.variable_scope('C1'):
    filter1 = tf.Variable(tf.random.normal([3, 3, n_channel, 32]), dtype=tf.float32, name='filter1')
    conv1 = tf.nn.conv2d(ph_x, filter1, strides=[1, 1, 1, 1], padding='SAME')
    relu1 = tf.nn.relu(conv1)
    pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
with tf.variable_scope('C2'):
    filter2 = tf.Variable(tf.random.normal([3, 3, 32, 64]), dtype=tf.float32, name='filter2')
    conv2 = tf.nn.conv2d(pool1, filter2, strides=[1, 1, 1, 1], padding='SAME')
    relu2 = tf.nn.relu(conv2)
    pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

# ⑩	最后将卷积输出结果压平，进行全连接操作，加入一层隐藏层全连接。神经个数100.
with tf.variable_scope('FC1'):
    pool2_shape = pool2.get_shape()
    n_flatten_width = pool2_shape[1] * pool2_shape[2] * pool2_shape[3]
    flatten = tf.reshape(pool2, [-1, n_flatten_width])
    fc1 = tsf.contrib.layers.fully_connected(flatten,
                                          100,
                                          activation_fn=tf.nn.sigmoid)
    logits = tsf.contrib.layers.fully_connected(fc1,
                                          n_cls,
                                          activation_fn=None)

# 11	完成损失函数，优化器进行梯度更新
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=ph_y))
train = tf.train.AdamOptimizer(learning_rate=alpha).minimize(cost)

# 12	写出测试函数，金牛星准确率验证。
predict = tf.cast(tf.argmax(logits, axis=1), dtype=tf.int32)
acc = tf.reduce_mean(tf.cast(
    tf.equal(
        predict,
        tf.cast(tf.argmax(ph_y, axis=1), dtype=tf.int32))
    , tf.float32))

# (2)	结果展示
# ①	输出loss值，写出对应accuracy
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    for epoch in range(n_epoch):
        total_batch = int(np.ceil(m_train / batch_size))
        for i in range(total_batch):
            bx, by = next_batch(batch_size)
            _, costv, accv = sess.run([train, cost, acc], feed_dict={ph_x: bx, ph_y: by})
            print(f'epoch#{epoch + 1}: batch#{i + 1}: cost = {costv}, acc = {accv}')

    # ②	可以进行合理的验证，抽取数据进行测试
    accv_test = sess.run(acc, feed_dict={ph_x: x_test, ph_y: y_test})
    print(f'测试集准确率：{accv_test}')
