import tensorflow as tf


def fun1():
    x_train = [1,2,3]
    y_train = [1,2,3]

    W = tf.Variable(tf.random_normal([1]), name='weight')
    b = tf.Variable(tf.random_normal([1]), name='bias')

    hypothesis = x_train * W + b
    cost = tf.reduce_mean(tf.square(hypothesis - y_train))
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
    train = optimizer.minimize(cost)

    ss = tf.Session()
    ss.run(tf.global_variables_initializer())

    for step in range(2001):
        ss.run(train)
        if step % 20 == 0:
            print(step, ss.run(cost), ss.run(W), ss.run(b))

def placeholder_fun():
    X = tf.placeholder(tf.float32, shape=[None])
    Y = tf.placeholder(tf.float32, shape=[None])

    W = tf.Variable(tf.random_normal([1]), name = "weight")
    b = tf.Variable(tf.random_normal([1]), name= "bias")

    hypothesis = X*W + b

    cost = tf.reduce_mean(tf.square(hypothesis - Y))
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
    train = optimizer.minimize(cost)

    with tf.Session() as ss:
        ss.run(tf.global_variables_initializer())

        for step in range(2001):
            cost_val, W_val, b_val, _ = ss.run([cost, W, b, train], feed_dict={X:[1,2,3], Y:[1,2,3]})
            if step % 20 == 0:
                print(step, cost_val, W_val, b_val)

        print(ss.run(hypothesis, feed_dict={X:[5]}))
        print(ss.run(hypothesis, feed_dict={X:[2.5]}))
        print(ss.run(hypothesis, feed_dict={X:[1.5, 2.5]}))


placeholder_fun()
