from python_ai.common.xcommon import sep
import tensorflow as tf
import numpy as np

tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)

# tensorflow.examples.tutorials is now deprecated and it is recommended to use tensorflow.keras.datasets as follows:
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data_bak/', one_hot=True)
# (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()

sep('my load data')
print(type(mnist))

batch_size = 100
hidden1_nodes = 200
N_IN = 784
N_OUT = 10
ITERS = 10000

sep('calc graph')
with tf.name_scope('Input'):
    x = tf.compat.v1.placeholder(tf.float32, shape=(None, N_IN))
    y = tf.compat.v1.placeholder(tf.float32, shape=(None, N_OUT))
    print(f'x: {x}')
    print(f'y: {y}')
with tf.name_scope('Inference'):
    w1 = tf.Variable(tf.random.normal([N_IN, hidden1_nodes], stddev=0.1))
    w2 = tf.Variable(tf.random.normal([hidden1_nodes, N_OUT], stddev=0.1))
    b1 = tf.Variable(tf.random.normal([hidden1_nodes], stddev=0.1))
    b2 = tf.Variable(tf.random.normal([N_OUT], stddev=0.1))
    hidden = tf.nn.relu(tf.matmul(x, w1) + b1)
    y_predict = tf.nn.relu(tf.matmul(hidden, w2) + b2)
    print(f'y_predict: {y_predict}')

with tf.name_scope('Loss_func'):
    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(
            labels=y, logits=y_predict
        )
    )
    print(f'cross_entropy: {cross_entropy}')

with tf.name_scope('Train'):
    train_step = tf.train.GradientDescentOptimizer(0.1)\
        .minimize(cross_entropy)
    print(f'train_step: {train_step}')

with tf.name_scope('Accuracy'):
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_predict, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    print(f'correct_prediction: {correct_prediction}')
    print(f'accuracy: {accuracy}')

exit(0)

sep('calc')
with tf.compat.v1.Session() as sess:
    sess.run(tf.compat.v1.global_variables_initializer())
    for i in range(ITERS):
        batch_xs, batch_ys = mnist.train.next_batch(batch_size)
        sess.run(train_step,
                 feed_dict={x: batch_xs, y: batch_ys})
        if i % 1000 == 0:
            print(f'Phase {i / 1000 + 1}: ', sess.run(accuracy,
                                                      feed_dict={
                                                          x: mnist.test.images,
                                                          y: mnist.test.labels
                                                      }))

sep('write graph')
with tf.compat.v1.summary.FileWriter("./logs/mnist.my", sess.graph) as writer:
    pass
