import tensorflow.compat.v1 as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np

np.random.seed(777)
tf.set_random_seed(777)

# 1.	现有题目如下：
# (1)	题目分析：
# ①	正确导入相关头文件的包。
# ②	通过对以上关于mnist数据的分析，自动加载。
mnist = input_data.read_data_sets('./data', one_hot=False)

# ③	转换成tensorflow可以运行的张量数据。
x_train = mnist.train.images
y_train = mnist.train.labels
m, n = x_train.shape
n_cls = len(np.unique(y_train))
x_test = mnist.test.images
y_test = mnist.test.labels
m_test, _ = x_test.shape

# ④	对加载的数据及进行特征缩放，洗牌等处理
# shuffle train data
rand_idx = np.random.permutation(m)
x_train = x_train[rand_idx]
y_train = y_train[rand_idx]
# scale train data
xmin = x_train.min()
xmax = x_train.max()
x_train -= xmin
x_train /= xmax - xmin
# shuffle test data
rand_idx = np.random.permutation(m_test)
x_test = x_test[rand_idx]
y_test = y_test[rand_idx]
# scale test data
xmin = x_test.min()
xmax = x_test.max()
x_test -= xmin
x_test /= xmax - xmin
# reshape labels
y_train = y_train.reshape(-1, 1)
y_test = y_test.reshape(-1, 1)

# ⑤	定义tf.placeholder，进行onehot处理。
ph_x = tf.placeholder(tf.float32, [None, n], 'placeholder_x')
ph_y = tf.placeholder(tf.int32, [None, 1], 'placeholder_y')
y_oh = tf.reshape(tf.one_hot(ph_y, n_cls), [-1, n_cls])

# ⑥	加入隐藏层，要求神经单元为100，200.
L1, L2 = 100, 200

# ⑦	根据网络模型结构，设置每一层weijht，baise。
w1 = tf.Variable(tf.random.normal([n, L1]), dtype=tf.float32, name='w1')
b1 = tf.Variable(tf.random.normal([1, L1]), dtype=tf.float32, name='b1')
w2 = tf.Variable(tf.random.normal([L1, L2]), dtype=tf.float32, name='w2')
b2 = tf.Variable(tf.random.normal([1, L2]), dtype=tf.float32, name='b2')
w3 = tf.Variable(tf.random.normal([L2, n_cls]), dtype=tf.float32, name='w3')
b3 = tf.Variable(tf.random.normal([1, n_cls]), dtype=tf.float32, name='b3')

# ⑧	写出前向预测函数，要求底层代码。（调用相关方法没有分）
z1 = tf.matmul(ph_x, w1) + b1
a1 = tf.nn.sigmoid(z1, name='a1')
z2 = tf.matmul(z1, w2) + b2
a2 = tf.nn.sigmoid(z2, name='a2')
z3 = tf.matmul(a2, w3) + b3
a3 = tf.nn.softmax(z3, name='a3')

# ⑨	写出loss函数。
cost = tf.reduce_mean(- tf.reduce_sum(y_oh * tf.math.log(a3), axis=1))

# ⑩	通过自己设计的网络模型，手动写出反向传播代码（不能调用函数包）
dz3 = a3 - y_oh  # m x n_cls
dz2 = tf.matmul(dz3, tf.transpose(w3)) * a2 * (1 - a2)  # m x L2
dz1 = tf.matmul(dz2, tf.transpose(w2)) * a1 * (1 - a1)  # m x L1
dw3 = tf.matmul(tf.transpose(a2), dz3) / tf.cast(tf.shape(a2)[0], dtype=tf.float32)  # L2 x n_cls
db3 = tf.reduce_mean(dz3, axis=0)  # 1 x n_cls
dw2 = tf.matmul(tf.transpose(a1), dz2) / tf.cast(tf.shape(a1)[0], dtype=tf.float32)  # L1 x L2
db2 = tf.reduce_mean(dz2, axis=0)  # 1 x L2
dw1 = tf.matmul(tf.transpose(ph_x), dz1) / tf.cast(tf.shape(ph_x)[0], dtype=tf.float32)  # n x L1
db1 = tf.reduce_mean(dz1, axis=0)  # 1 x n

# 11	运用tf.assign功能，进行梯度更新；
alpha = 0.1
train = [
    tf.assign(w3, w3 - alpha * dw3),
    tf.assign(b3, b3 - alpha * db3),
    tf.assign(w2, w2 - alpha * dw2),
    tf.assign(b2, b2 - alpha * db2),
    tf.assign(w1, w1 - alpha * dw1),
    tf.assign(b1, b1 - alpha * db1),
]

# (2)	会话功能：
# ①	训练模型，迭代运行2000次，要求200回合输出对应loss值。最后输出acurrancy（准确率）
predict = tf.argmax(a3, axis=1)
acc = tf.reduce_mean(tf.cast(tf.equal(
    predict,
    tf.argmax(y_oh, axis=1)
), tf.float32))

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    iters = 2000
    group = 200
    for i in range(iters):
        _, costv, accv = sess.run([train, cost, acc], feed_dict={ph_x: x_train, ph_y: y_train})
        if i % group == 0:
            print(f'#{i + 1}: cost = {costv}, acc = {accv}')
    if i % group != 0:
        print(f'#{i + 1}: cost = {costv}, acc = {accv}')
    print(f'最后输出acurrancy（准确率）:训练集: {sess.run(acc, feed_dict={ph_x: x_train, ph_y: y_train})}')
    print(f'最后输出acurrancy（准确率）:测试集: {sess.run(acc, feed_dict={ph_x: x_test, ph_y: y_test})}')

    # ②	可以随机抽取一组对应数据进行验证。输出验证结果。
    print('随机抽取一组对应数据进行验证。输出验证结果:')
    k = 10
    predict_v = sess.run(predict, feed_dict={ph_x: x_test[:k]})
    print(f'Targets: {y_test[:k]}')
    print(f'Hypothesis: {predict_v}')
