# Define  loss function
def square_error_function(w, b, points):
    total_error = 0
    # Sum all points of loss
    for i in range(0, len(points)):
        x = points[i, 0]
        y = points[i, 1]
        total_error += (y - (w * x + b)) ** 2
    return total_error / float(len(points))


# Define gradient descent
def step_gradient(w_current, b_current, points, learning_rate):
    # Set up default gradient parameter
    w_gradient = 0
    b_gradient = 0
    n = float(len(points))
    for i in range(0, len(points)):
        x = points[i, 0]
        y = points[i, 1]
        w_gradient += -(2 / n) * x * (y - ((w_current * x) + b_current))
        b_gradient += -(2 / n) * (y - ((w_current * x) + b_current))
    w_update = w_current - learning_rate * w_gradient
    b_update = b_current - learning_rate * b_gradient
    return [w_update, b_update]


# Define iteration
def gradient_descent_iteration(points, w_iniitail, b_initial, learning_rate, iteration_num):
    w = w_iniitail
    b = b_initial
    for i in range(iteration_num):
        [w, b] = step_gradient(w, b, points, learning_rate)
    return [w, b]
