# XOR_BP
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

tf.set_random_seed(777)

# datasets
x_data = [
    [0, 0],
    [0, 1],
    [1, 0],
    [1, 1]
]
y_data = [
    [0],
    [1],
    [1],
    [0]
]

# placeholder
X = tf.placeholder(tf.float32, shape=[None, 2])
Y = tf.placeholder(tf.float32, shape=[None, 1])

# model
W1 = tf.Variable(tf.random_normal([2, 3]), name="Weight1")
b1 = tf.Variable(tf.random_normal([3]), name="bias1")
W2 = tf.Variable(tf.random_normal([3, 1]), name="Weight2")
b2 = tf.Variable(tf.random_normal([1]), name="bias2")

# forward pro
z1 = tf.matmul(X, W1) + b1
a1 = tf.sigmoid(z1)
z2 = tf.matmul(a1, W2) + b2
a2 = tf.sigmoid(z2)

# cost
cost = - tf.reduce_mean(Y * tf.log(a2) + (1 - Y) * tf.log(1 - a2))
cost_history = []

# back pro
dz2 = a2 - Y
dW2 = tf.matmul(tf.transpose(a1), dz2) / tf.cast(tf.shape(a1)[0], tf.float32)
db2 = tf.reduce_mean(dz2, axis=[0])

da1 = tf.matmul(dz2, tf.transpose(W2))
dz1 = da1 * a1 * (1 - a1)
dW1 = tf.matmul(tf.transpose(X), dz1) / tf.cast(tf.shape(X)[0], tf.float32)
db1 = tf.reduce_mean(dz1, axis=[0])

# update
learning_rate = 10e-2
update = [
    tf.assign(W2, W2 - learning_rate * dW2),
    tf.assign(b2, b2 - learning_rate * db2),
    tf.assign(W1, W1 - learning_rate * dW1),
    tf.assign(b1, b1 - learning_rate * db1)
]

# predicted
predicted = tf.cast(a2 > 0.5, tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), tf.float32))

# launch a session
sess = tf.Session()
sess.run(tf.global_variables_initializer())

# train model
for step in range(10001):
    cost_val, _ = sess.run([cost, update], feed_dict={X: x_data, Y: y_data})
    if step % 500 == 0:
        print("Step: ", step, "Cost: ", cost_val)
        cost_history.append(cost_val)

# plot
plt.plot(cost_history[1:])
plt.show()

# eval
predicted_y = sess.run(a2, feed_dict={X: x_data})
print("predicted_y: \n", predicted_y)
print("true_y: \n", y_data)

# test reporter
acc_val = sess.run(accuracy, feed_dict={X: x_data, Y: y_data})
print("Accurcy: ", acc_val)
