from __future__ import print_function
import tensorflow as tf

node1 = tf.constant(3.0, dtype=tf.float32)
node2 = tf.constant(4.0)  # also tf.float32 implicitly
print(node1, node2)
# the print statement yields : Tensor("Const:0", shape=(), dtype=float32) Tensor("Const_1:0", shape=(), dtype=float32)

# to evaluate the node, run the computational graph with a session
sess = tf.Session();
print(sess.run([node1, node2]))

# add the two nodes
node3 = tf.add(node1, node2)
print("node3: ", node3)
print("sess.run(node3): ", sess.run(node3))

# a graph accepting external inputs
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
adder_node = a + b # provides short cut for tf.add(a,b)
print(sess.run(adder_node, {a:3, b:4.5}))
print(sess.run(adder_node,{a:[1,3], b:[2,4]}))
# adding another operation
add_and_triple = adder_node * 3
print(sess.run(add_and_triple, {a:3, b: 4.5}))

# make a trainable model
W = tf.Variable([.3], dtype=tf.float32)
b = tf.Variable([-.3], dtype=tf.float32)
x = tf.placeholder(tf.float32)
linear_model = W*x + b
# initialize the variables
init = tf.global_variables_initializer()
sess.run(init)

print(sess.run(linear_model, {x:[1,2,3,4]}))

# measure the model
y = tf.placeholder(tf.float32)
squared_daltas = tf.square(linear_model - y) # like x.*x in Matlab
loss = tf.reduce_sum(squared_daltas)
print(sess.run(loss,{x:[1,2,3,4], y:[0,-1,-2,-3]}))

# assign the variables
fixW = tf.assign(W,[-1])
fixb = tf.assign(b,[1.0])
sess.run([fixW,fixb])
print(sess.run(loss,{x:[1,2,3,4], y:[0,-1,-2,-3]}))

### train API and optimizer
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)

# train
sess.run(init)
for i in range(1000):
	sess.run(train,{x:[1,2,3,4], y:[0,-1,-2,-3]})

print(sess.run([W, b]))