import tensorflow.compat.v1 as tf
import numpy as np
import pickle
import os
from tensorflow.contrib.layers import fully_connected

# random seeds
np.random.seed(777)
tf.set_random_seed(777)

# 3.	Cnn题目
# (1)	数据处理cifar-100-py
# ①	导入规范的数据包
# ②	获取cifar-100-py数据集，将图片信息转化成数字信息（数据转换）
print('Loading data ...')
N_CH = 3
N_SIDE = 32
with open('./cifar-100-python/train', 'br') as f:
    dict_train = pickle.load(f, encoding='bytes')  # [b'filenames', b'batch_label', b'fine_labels', b'coarse_labels', b'data']
with open('./cifar-100-python/test', 'br') as f:
    dict_test = pickle.load(f, encoding='bytes')
# train data
x_train = np.array(dict_train[b'data'], dtype=np.float32) / 255.0  # (50000, 3072)
x_train = x_train.reshape([-1, N_CH, N_SIDE, N_SIDE])
x_train = np.transpose(x_train, [0, 2, 3, 1])
m_train = len(x_train)
# train label
y_train = np.array(dict_train[b'fine_labels']).reshape(-1, 1)  # (50000, 1)
n_cls = len(np.unique(y_train))  # 100
# test data
x_test = np.array(dict_test[b'data'], dtype=np.float32) / 255.0  # (10000, 3072)
x_test = x_test.reshape([-1, N_CH, N_SIDE, N_SIDE])
x_test = np.transpose(x_test, [0, 2, 3, 1])
m_test = len(x_test)
# test label
y_test = np.array(dict_test[b'fine_labels']).reshape(-1, 1)  # (10000, 1)
print('Data loaded.')

# ③	手动实现小批量函数处理，规范学习率，训练周期，批量样本数相关参数。
g_b = 0
n_epoch = 3  # 为了让运行时间短一些，这里设成3。正式代码请改成100
batch_size = 100
alpha = 0.001
ver = 'v1.0'


def next_batch(batch_size):
    global g_b
    bx = x_train[g_b:g_b + batch_size]
    by = y_train[g_b:g_b + batch_size]
    g_b += batch_size
    return bx, by

# ④	规范占位符，加入独热功能。
ph_x = tf.placeholder(tf.float32, [None, N_SIDE, N_SIDE, N_CH], name='ph_x')
ph_y = tf.placeholder(tf.int32, [None, 1], name='ph_y')
y_oh = tf.reshape(tf.one_hot(ph_y, n_cls), [-1, n_cls])

# ⑤	规范cnn模型搭建，完成2层卷积一层全连接模型。
# ⑥	卷积核，池化核规模自定，要求卷积后图片大小不变，通道数扩大一倍
# ⑦	池化后图片尺寸减小一半。
# first convolution
filter1 = tf.Variable(tf.random.normal([3, 3, N_CH, 32]), dtype=tf.float32, name='filter1')
conv1 = tf.nn.conv2d(ph_x, filter1, strides=[1, 1, 1, 1], padding='SAME')
relu1 = tf.nn.relu(conv1)
pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# second convolution
filter2 = tf.Variable(tf.random.normal([3, 3, 32, 64]), dtype=tf.float32, name='filter2')
conv2 = tf.nn.conv2d(pool1, filter2, strides=[1, 1, 1, 1], padding='SAME')
relu2 = tf.nn.relu(conv2)
pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

# ⑧	将卷积之后的结果进行全连接。
flatten_dim = pool2.shape[1] * pool2.shape[2] * pool2.shape[3]
fc_in = tf.reshape(pool2, [-1, flatten_dim])
fc1 = fully_connected(fc_in, 1024, activation_fn=tf.nn.relu)
logits = fully_connected(fc1, n_cls, activation_fn=None)

# cost, train, acc, summary
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_oh))
train = tf.train.AdamOptimizer(learning_rate=alpha).minimize(cost)
acc = tf.reduce_mean(
    tf.cast(
        tf.equal(
            tf.cast(tf.argmax(logits, axis=1), dtype=tf.int32),
            tf.cast(ph_y, dtype=tf.int32)
        ),
        tf.float32
    )
)
tf.summary.scalar('cost', cost)
tf.summary.scalar('acc', acc)
summary = tf.summary.merge_all()

# ⑨	规范绘画，进行合理的迭代运算。
filename = os.path.basename(__file__)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    with tf.summary.FileWriter('./_log/' + filename, sess.graph) as fw:
        saver = tf.train.Saver()
        save_path = './_save/' + filename
        if os.path.exists(save_path + '.meta'):
            saver.restore(sess, save_path)
            print('Session restored.')
        else:
            print('Training started')
            g_step = 0
            for epoch in range(n_epoch):
                g_b = 0
                total_batch = int(np.ceil(m_train / batch_size))
                group = int(np.ceil(total_batch / 10))
                for i in range(total_batch):
                    bx, by = next_batch(batch_size)
                    _, costv, accv, sv = sess.run([train, cost, acc, summary], feed_dict={ph_x: bx, ph_y: by})
                    fw.add_summary(sv, g_step)
                    g_step += 1
                    if i % group == 0:
                        print(f'g_step#{g_step + 1}: epoch#{epoch + 1}: batch#{i + 1}: cost = {costv}, acc = {accv}')
                        fw.flush()
                    if np.isclose(1.0, accv):
                        break
                if i % group != 0:
                    print(f'g_step#{g_step + 1}: epoch#{epoch + 1}: batch#{i + 1}: cost = {costv}, acc = {accv}')
                    fw.flush()
                if np.isclose(1.0, accv):
                    print('Training converged.')
                    break
            print('Training over')
            saver.save(sess, save_path)
            print('Session saved.')

    # ⑩	最后输出测试模型的准确率。.
    print('输出测试模型的准确率:')
    accv = sess.run(acc, feed_dict={ph_x: x_test, ph_y: y_test})
    print(accv)