import tensorflow.compat.v1 as tf
import tensorflow as tsf
import numpy as np
import os

np.random.seed(777)
tf.set_random_seed(777)
file_name = os.path.basename(__file__)

ver = 'v1.0'  # 代码版本（作为存储session的文件名的一部分）
train_data_rate = 0.8  # 每次选择总样本的前80%做为训练集数据
learning_rate = 0.01  # 学习率
batch_size = 100  # 每批100个训练样本
n_epoch = 15  # 一共迭代15次

# 从文件里读取图片数据和标签，10000个样本，每个样本是16*16的手写数字0到9的灰度数据，设计卷积神经网络，包括2个卷积层、1个全连接层，实现手写数字识别。
# 1.	读取数据文件img_16_10k.txt（8分）
data = np.loadtxt('./_data/img_16_10k.txt', delimiter=',')
np.random.shuffle(data)

# 2.	切分图片数据和标签（8分）
x, labels = np.hsplit(data, [-1])
labels = np.array(labels, dtype=np.int32).ravel()
m, n = x.shape
n_cls = len(np.unique(labels))
y = np.eye(n_cls)[labels]
print('x', x.shape)  # (10000, 256)
print('y', y.shape)  # (10000, 10)

# scale
xmin = x.min()
xmax = x.max()
x -= xmin
x /= xmax - xmin

# split
m_train = int(np.ceil(m * train_data_rate))
m_test = m - m_train
x_train, x_test = np.split(x, [m_train])
y_train, y_test = np.split(y, [m_train])
print('x_train', x_train.shape)
print('y_train', y_train.shape)
print('x_test', x_test.shape)
print('y_test', y_test.shape)

# mini-batch
g_batch_i = 0


def next_batch(batch_size):
    global g_batch_i
    bx = x_train[g_batch_i:g_batch_i + batch_size]
    by = y_train[g_batch_i:g_batch_i + batch_size]
    g_batch_i += batch_size
    return bx, by


# placeholders
with tf.variable_scope('Input'):
    ph_x = tf.placeholder(tf.float32, [None, n], 'ph_x')
    ph_y = tf.placeholder(tf.float32, [None, n_cls], 'ph_y')

# 3.	建第一个卷积层、relu（8分）16*16*1==>16*16*32
with tf.variable_scope('C1'):
    pic = tf.reshape(ph_x, [-1, 16, 16, 1], 'pic')
    filter1 = tf.Variable(tf.random.normal([3, 3, 1, 32]), dtype=tf.float32, name='filter1')
    conv1 = tf.nn.conv2d(pic, filter1, strides=[1, 1, 1, 1], padding='SAME', name='conv1')
    relu1 = tf.nn.relu(conv1, 'relu1')
    # 4.	对第1层的数据做最大池化（8分）==>8*8*32
    pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')

# 5.	建第二个卷积层、relu（6分）8*8*32==>8*8*64
with tf.variable_scope('C2'):
    filter2 = tf.Variable(tf.random.normal([1, 1, 32, 64]), dtype=tf.float32, name='filter2')
    conv2 = tf.nn.conv2d(pool1, filter2, strides=[1, 1, 1, 1], padding='SAME', name='conv2')
    relu2 = tf.nn.relu(conv2, 'relu1')
    # 6.	对第2层的最大池化（6分）==>4*4*64
    pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2')

# 7.	全连接层（8分）4*4*64==>10
with tf.variable_scope('FC'):
    fc_dim = pool2.shape[1] * pool2.shape[2] * pool2.shape[3]
    fc_in = tf.reshape(pool2, [-1, fc_dim], name='fc_in')
    logits = tsf.contrib.layers.fully_connected(fc_in, n_cls, activation_fn=None)

# 8.	计算代价或损失函数（8分）
with tf.variable_scope('Cost_acc_summary'):
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=ph_y))
    predict = tf.argmax(logits, axis=1)
    acc = tf.reduce_mean(
        tf.cast(
            tf.equal(
                predict,
                tf.argmax(ph_y, axis=1)
            ),
            dtype=tf.float32
        )
    )
    tf.summary.scalar('cost', cost)
    tf.summary.scalar('acc', acc)
    summary = tf.summary.merge_all()

# 9.	使用adam优化器（8分）
with tf.variable_scope('Train'):
    train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    save_path = './_save/' + file_name + '_' + ver
    saver = tf.train.Saver()
    if os.path.exists(save_path + '.meta'):
        saver.restore(sess, save_path)
        print('SESSION LOADED!')
    else:
        with tf.summary.FileWriter('./_log/' + file_name, sess.graph) as fw:
            print('TRAINING STARTED!')
            g_step = -1
            # 10.	一共迭代15次，每次选择总样本的前80%做为训练集数据（8分）
            for epoch in range(n_epoch):
                total_batch = int(np.ceil(m_train / batch_size))
                g_batch_i = 0
                group = int(np.ceil(total_batch / 10))
                for i in range(total_batch):
                    g_step += 1
                    # 11.	分批次训练，每批100个训练样本（8分）
                    bx, by = next_batch(batch_size)
                    _, costv, accv, sv = sess.run([train, cost, acc, summary], feed_dict={ph_x: x_train, ph_y: y_train})
                    fw.add_summary(sv, g_step)
                    if i % group == 0:
                        print(f'g_step#{g_step+1}: epoch#{epoch+1}: batch#{i+1}: cost = {costv}, acc= {accv}')
                        fw.flush()
                    if np.isclose(1.0, accv):
                        break
                if i % group != 0:
                    print(f'g_step#{g_step + 1}: epoch#{epoch + 1}: batch#{i + 1}: cost = {costv}, acc= {accv}')
                    fw.flush()
                if np.isclose(1.0, accv):
                    print('TRAINING CONVERGED!')
                    break
            print('TRAINING OVER!')
            saver.save(sess, save_path)
            print('SESSION SAVED!')

    # 12.	选取最后的20%数据做测试集，计算准确率（8分）
    accv = sess.run(acc, feed_dict={ph_x: x_test, ph_y: y_test})
    print('选取最后的20%数据做测试集，计算准确率')
    print(accv)

    # 13.	抽一个测试样本进行验证（8分）
    print('抽一个测试样本进行验证')
    rand_idx = np.random.randint(0, m_test - 1)
    x_verif = x_test[rand_idx:rand_idx+1]
    y_verif = y_test[rand_idx:rand_idx+1]
    print(f'Target: {np.argmax(y_verif, axis=1)}')
    h_verif = sess.run(predict, feed_dict={ph_x: x_verif})
    print(f'Hypothesis: {h_verif}')
