import tensorflow.compat.v1 as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib.pyplot as plt

# 1.	现有以下关于Artificial Neural Network列题，按照要求完成。
# (1)	数据读取
# ①	正确加载手写数字识别mnist数据。
mnist = input_data.read_data_sets('./data/', one_hot=True)
m, n = mnist.train.images.shape
_, n_cls = mnist.train.labels.shape

# ②	定义数据集，完成相关参数调制（归一化，超参数设置）等
# training data
x_train = np.array(mnist.train.images)
min = x_train.min()
max = x_train.max()
x_train -= min
x_train /= max - min
y_train = mnist.train.labels
# testing data
x_test = np.array(mnist.test.images)
min = x_test.min()
max = x_test.max()
x_test -= min
x_test /= max - min
y_test = mnist.test.labels
# hyper params
alpha = 0.1

# ③	根据加载数据结构类型，正确定义占位符。
x = tf.placeholder(tf.float32, [None, n], 'placeholder_x')
y = tf.placeholder(tf.float32, [None, n_cls], 'placeholder_y')

# ④	要求网络结构加入两层隐藏层，每层各20个神经单元。
# ⑤	根据已给的网络模型，正确初始化相关权重和偏执。
w1 = tf.Variable(tf.random.normal([n, 20]), dtype=tf.float32, name='w1')
b1 = tf.Variable(tf.random.normal([1, 20]), dtype=tf.float32, name='b1')
w2 = tf.Variable(tf.random.normal([20, 20]), dtype=tf.float32, name='w2')
b2 = tf.Variable(tf.random.normal([1, 20]), dtype=tf.float32, name='b2')
w3 = tf.Variable(tf.random.normal([20, 10]), dtype=tf.float32, name='w3')
b3 = tf.Variable(tf.random.normal([1, 10]), dtype=tf.float32, name='b3')

# ⑥	底层抒写正向传播代码底层，注意隐藏层激活函数使用relu【=>sigmoid】，最后一层softmax。
z1 = tf.matmul(x, w1) + b1
a1 = tf.sigmoid(z1)
z2 = tf.matmul(a1, w2) + b2
a2 = tf.sigmoid(z2)
z3 = tf.matmul(a2, w3) + b3
a3 = tf.nn.softmax(z3, name='a3')

# ⑦	请底层写出多分类的损失函数。
cost = tf.reduce_mean(
    - tf.reduce_sum(y * tf.math.log(a3), axis=1)
)

# ⑧	根据网络结构，要求使用底层写出反向传播步骤。每层结构分开写，加上注释。
dz3 = a3 - y  # mx10
da2 = tf.matmul(dz3, tf.transpose(w3))  # mx10,10x20=>mx20
dz2 = da2 * a2 * (1 - a2)  # mx20
da1 = tf.matmul(dz2, tf.transpose(w2))  # mx20,20x20=>mx20
dz1 = da1 * a1 * (1 - a1)  # mx20

# 20xm,mx10=>20x10
dw3 = tf.matmul(tf.transpose(a2), dz3) / tf.cast(tf.shape(a2)[0], dtype=tf.float32)
db3 = tf.reduce_mean(dz3, axis=0)  # 1x10

# 20xm,mx20=>20x20
dw2 = tf.matmul(tf.transpose(a1), dz2) / tf.cast(tf.shape(a1)[0], dtype=tf.float32)
db2 = tf.reduce_mean(dz2, axis=0)  # 1x20

# nxm,mx20=>nx20
dw1 = tf.matmul(tf.transpose(x), dz1) / tf.cast(tf.shape(x)[0], dtype=tf.float32)
db1 = tf.reduce_mean(dz1, axis=0)  # 1x20

# ⑨	使用tf.assign功能，完成参数更新
update = [
    tf.assign(w3, w3 - alpha * dw3),
    tf.assign(w2, w2 - alpha * dw2),
    tf.assign(w1, w1 - alpha * dw1),
    tf.assign(b3, b3 - alpha * db3),
    tf.assign(b2, b2 - alpha * db2),
    tf.assign(b1, b1 - alpha * db1),
]

# ⑩	写出计算准确率
acc = tf.reduce_mean(
    tf.cast(tf.equal(
        tf.argmax(a3, axis=1),
        tf.argmax(y, axis=1)
    ), tf.float32)
)

# (2)	张量流图实现
# ①	创建会话，训练模型，执行迭代循环2000次，每100次打印交叉熵结果，并最终输出准确率。
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    iters = 2000
    group = 100
    cost_arr = np.zeros(iters)
    for i in range(iters):
        _, costv, accv = sess.run([update, cost, acc], feed_dict={x: x_train, y: y_train})
        cost_arr[i] = costv
        if i % group == 0:
            print(f'#{i + 1}, cost = {costv}, acc = {accv}')
    if i % group != 0:
        print(f'#{i + 1}, cost = {costv}, acc = {accv}')

    # ②	绘制最终交叉熵的变化图像
    plt.plot(cost_arr)
    plt.show()

    # ③	指定一个文件用来保存训练日志，使用Tensorboard查看训练过程(tf.summary.FileWriter)。要求生成日志文件，正确调用 Tensorboard指定写出来。
    fw = tf.summary.FileWriter('./log', sess.graph)
    fw.close()
    # tensorboard --logdir "C:\Users\peter\PycharmProfProj\CondaTf1440Proj\python_ai\review\DL_1\weekly02_simulator\log" --host 127.0.0.1
