import tensorflow.compat.v1 as tf
import tensorflow as tsf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
import os
import sys

# 1.	简单了解了tensorflow的学习史，完成下面的task：
# (1)	完成mnist的卷积神经网络
# ①	自动load数据集。
path = r'../../../../large_data/DL1/mnist'
if not os.path.exists(path):
    print('[[[ DATA DIR WRONG ! ]]] ', file=sys.stderr)
    sys.exit(0)
mnist = input_data.read_data_sets(path, one_hot=False)
_, n_feature = mnist.train.images.shape
n_cls = len(np.unique(mnist.train.labels))
n_height = int(np.sqrt(n_feature))
n_width = n_feature // n_height
print(n_feature, n_cls, n_height, n_width)

# ②	合理设置learing_rate,批量次数，训练总周期。
alpha = 0.01
batch_size = 1000
n_epoch = 3
keep_prob = 0.9

# ③	自定义tf.placeholder，进行维度转换。要求one-hot。
ph_x = tf.placeholder(tf.float32, [None, n_feature], name='ph_x')
ph_y = tf.placeholder(tf.int32, [None, 1], name='ph_y')
ph_keep_prob = tf.placeholder(tf.float32, name='ph_kb')
x_reshaped = tf.reshape(ph_x, [-1, n_height, n_width, 1])
y_oh = tf.reshape(tf.one_hot(ph_y, n_cls), [-1, n_cls])

# ④	熟悉Lenet网络模型，完成卷积运算。在已知网络模型的情况下，或者通过上面介绍，完成C1层卷积操作。
with tf.variable_scope('C1'):
    filter1 = tf.Variable(tf.random.normal([5, 5, 1, 6]), dtype=tf.float32, name='filter1')
    conv1 = tf.nn.conv2d(x_reshaped, filter1, strides=[1, 1, 1, 1], padding='SAME', name='conv1')
    print(conv1)  # (?, 28, 28, 6)
    relu1 = tf.nn.relu(conv1, name='relu1')
    relu1 = tf.nn.dropout(relu1, keep_prob=ph_keep_prob)

# ⑤	按照要求完成S2层操作。
with tf.variable_scope('S2'):
    pool2 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2')
    print(pool2)  # (?, 14, 14, 6)

# ⑥	按照信息要求完成C3层卷积操作。
with tf.variable_scope('C3'):
    filter3 = tf.Variable(tf.random.normal([5, 5, 6, 16]), dtype=tf.float32, name='filter3')
    conv3 = tf.nn.conv2d(pool2, filter3, strides=[1, 1, 1, 1], padding='VALID', name='conv3')
    print(conv3)  # (?, 10, 10, 16)
    relu3 = tf.nn.relu(conv3, name='relu3')
    relu3 = tf.nn.dropout(relu3, keep_prob=ph_keep_prob)

# ⑦	按照要求完成S4层池化操作。
with tf.variable_scope('S4'):
    pool4 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool4')
    print(pool4)  # (?, 5, 5, 16)

# ⑧	C5层是一个卷积层，完成正确操作。
with tf.variable_scope('C5'):
    filter5 = tf.Variable(tf.random.normal([5, 5, 16, 120]), dtype=tf.float32, name='filter5')
    conv5 = tf.nn.conv2d(pool4, filter5, strides=[1, 1, 1, 1], padding='VALID', name='conv5')
    print(conv5)  # (?, 1, 1, 120)
    relu5 = tf.nn.relu(conv5, name='relu5')
    relu5 = tf.nn.dropout(relu5, keep_prob=ph_keep_prob)

# ⑨	6层是全连接层，7是output层，写出正确代码。
with tf.variable_scope('F6'):
    dim = relu5.shape[1] * relu5.shape[2] * relu5.shape[3]
    flatten = tf.reshape(relu5, [-1, dim], name='flatten')
    print(flatten)  # (?, 120)
    f6 = tsf.contrib.layers.fully_connected(flatten, 84, activation_fn=tf.nn.relu)  # ATTENTION relu at fully connected layer
    print(f6)  # (?, 84)

with tf.variable_scope('F7'):
    logits = tsf.contrib.layers.fully_connected(f6, 10, activation_fn=None)
    print(logits)  # (?, 10)

# ⑩	要求实现dropout功能
#     已在之前relu后添加dropout

# 11	写出cost代码，梯度更新。
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_oh))
train = tf.train.AdamOptimizer(learning_rate=alpha).minimize(cost)
predict = tf.reshape(tf.cast(tf.argmax(logits, axis=1), dtype=tf.int32), [-1, 1], name='predict')  # ATTENTION 这里得reshape成m行1列矩阵
print(predict)
acc = tf.reduce_mean(tf.cast(
    tf.equal(predict, ph_y),
    tf.float32
))
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    for epoch in range(n_epoch):
        n_batch = mnist.train.num_examples // batch_size
        group = n_batch // 20
        for i in range(n_batch):
            x_batch, y_batch = mnist.train.next_batch(batch_size)
            x_batch = x_batch / 255
            y_batch = y_batch.reshape([-1, 1])
            _, costv, accv = sess.run([train, cost, acc], feed_dict={ph_x: x_batch, ph_y: y_batch, ph_keep_prob: keep_prob})
            if i % group == 0:
                print(f'epoch#{epoch + 1}: batch#{i + 1}: cost = {costv}, acc = {accv}')
        if i % group != 0:
            print(f'epoch#{epoch + 1}: batch#{i + 1}: cost = {costv}, acc = {accv}')

    # 12	进行合理解释。
    # (2)	结果展示
    # ①	展示结果，输出损失函数，对应的准确率（5分）
    # ②	打印模型测试集的最终的accurancy。5分）
    print(f'最终的accurancy: {sess.run(acc, feed_dict={ph_x: mnist.test.images / 255, ph_y: mnist.test.labels.reshape([-1, 1]), ph_keep_prob: keep_prob})}')

    # ③	测试模型检查准确率，进行随机抽取验证（6分）
    #
