

import numpy as np
import matplotlib.pyplot as plt
x = np.random.uniform(0,1,20)
def f(x):
    return 3.5*x
noise_variance = 0.2
noise = np.random.randn(x.shape[0]) * noise_variance

t = f(x) + noise
print(t)

def nn(x, w):
    return x*w

# Define the cost function
def cost(y, t):
    return ((t - y) ** 2).sum()

def gradient(w, x, t):
    return 2 * x * (nn(x, w) - t)

# define the update function delta w
def delta_w(w_k, x, t, learning_rate):
    return learning_rate * gradient(w_k, x, t).sum()

# Set the initial weight parameter
w = 0.1
# Set the learning rate
learning_rate = 0.1

# Start performing the gradient descent updates, and print the weights and cost:
nb_of_iterations = 4 # number of gradient descent updates
w_cost = [(w, cost(nn(x, w), t))] # List to store the weight, costs values
for i in range(nb_of_iterations):
    dw = delta_w(w, x, t, learning_rate) # Get the delta w update
    w = w - dw # Update the current weight parameter
    w_cost.append((w, cost(nn(x, w), t))) # Add weight, cost to list

# Print the final w, and cost
for i in range(0, len(w_cost)):
    print('w({}): {:.4f} \t cost: {:.4f}'.format(i, w_cost[i][0], w_cost[i][1]))


plt.plot(x, t, 'o', label='t')
# Plot the initial line
plt.plot([0, 1], [f(0), f(1)], 'b-', label='f(x)')
plt.xlabel('$x$', fontsize=15)
plt.ylabel('$t$', fontsize=15)
#plt.ylim([0,2])
plt.title('inputs (x) vs targets (t)')
plt.grid()
plt.legend(loc=2)
plt.show()

