import numpy as np
import matplotlib.pyplot as plt
import math

x_train = np.array([[2104,5,1,45],
                    [1416,3,2,40],
                    [ 852,2,1,35]])
y_train = np.array([460,232,178])

w_init = np.zeros(x_train.shape[1])
b_init = 0

def compute_model_output(x,y,w,b):
    m = x.shape[0]#training set的个数
    f_wb = np.zeros(m)
    for ii_ in range(0,m,1):
        f_wb[ii_] = np.dot(w,x[ii_])+b
    return f_wb

def compute_cost(x,y,w,b):#squared cost funtion
    m = x.shape[0]
    cost_sum = 0
    for ii_ in range(m):
        f_wb = np.dot(w,x[ii_])+b
        cost_sum = cost_sum + (f_wb-y[ii_])**2
    cost_sum = 1/(2*m)*cost_sum
    return cost_sum

def compute_gradient(x,y,w,b):
    m,n = x.shape
    dj_dw = np.zeros(n)
    dj_db = 0
    for ii_ in range(m):
        f_wb = np.dot(w,x[ii_])+b
        err = f_wb - y[ii_]
        for jj_ in range(n):
            dj_dw[jj_] = dj_dw[jj_] + err * x[ii_,jj_]
        dj_db = dj_db + err
    dj_dw = dj_dw / m
    dj_db = dj_db / m
    return dj_dw,dj_db

def gradient_descent(x,y,w,b,alpha,num_inters,cost_function,gradient_function):
    J_history = []
    
    for ii_ in range(num_inters):
        dj_dw,dj_db = gradient_function(x,y,w,b)
        w = w - alpha*dj_dw
        b = b - alpha*dj_db

        J_history.append(cost_function(x,y,w,b))

        if ii_ % math.ceil(num_inters/10) == 0:
            print(f'Interation:{ii_:4}'
                  f'Cost:{J_history[-1]:0.2e}')
    return w,b,J_history

w_final,b_final,J_history = gradient_descent(x_train,y_train,w_init,b_init,5.0e-7,1000,compute_cost,compute_gradient)


plt.plot(J_history)
plt.show()
print(f'final w,b are: {w_final,b_final}')
