import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

if __name__ == '__main__':
    points_num = 100
    vectors = []

    for i in range(points_num):
        x = np.random.normal(0.0, 0.66)
        y = 01. * x + 0.2 + np.random.normal(0.0, 0.4)
        vectors.append((x, y))

    x_datas = [v[0] for v in vectors]
    y_datas = [v[1] for v in vectors]

    plt.plot(x_datas, y_datas, 'r*', label="original data")
    plt.title("Linear Regression using Gradient Descent")
    plt.legend()
    plt.show()

    # Linear Regression function
    W = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
    b = tf.Variable(tf.zeros([1]))
    y = W * x_datas + b

    # loss function
    loss = tf.reduce_mean(tf.square(y - y_datas))

    optimizer = tf.train.GradientDescentOptimizer(0.5)
    train = optimizer.minimize(loss)

    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)

        for step in range(20):
            sess.run(train)
            print("Step = {0}, Loss = {1}, [Weight = {2} Bias= {3}]".format(step, sess.run(loss), sess.run(W), sess.run(b)))

        plt.plot(x_datas, y_datas, 'r*', label="original data")
        plt.title("Linear Regression using Gradient Descent")
        plt.plot(x_datas,sess.run(W)*x_datas+sess.run(b),label="Fitted Line")
        plt.legend()
        plt.xlabel('x')
        plt.xlabel('y')
        plt.show()