import numpy as np
import matplotlib.pyplot as plt
import math

def compute_model_output(x,w,b,times):
    xx = x
    for tt_ in range(2,times+1):
        xx = np.c_[xx,np.power(x[:,0],tt_)]
    f_wb = xx * w + b
    return f_wb

def compute_cost(x,y,w,b,times):
    xx = x
    for tt_ in range(2,times+1):
        xx = np.c_[xx,np.power(x[:,0],tt_)]
    m = xx.shape[0]
    f_wb = xx * w + b
    cost = np.sum(np.power((f_wb - y),2))/(2*m)
    return cost

def compute_gradient(x,y,w,b,times):
    xx = x
    for tt_ in range(2,times+1):
        xx = np.c_[xx,np.power(x[:,0],tt_)]
    m,n = xx.shape
    f_wb = xx * w + b
    dj_dw = (xx.T * (f_wb - y))/m
    dj_db = np.sum(f_wb - y)/m
    return dj_dw,dj_db

def gradient_descent(x,y,w,b,alpha,num_iters,cost_function,gradient_function,times):
    J_history = []
    for i in range(num_iters):
        dj_dw,dj_db = compute_gradient(x,y,w,b,times)
        w -= alpha*dj_dw
        b -= alpha*dj_db

        J_history.append(compute_cost(x,y,w,b,times))
        if i % math.ceil(num_iters/10) == 0:
            print(f'iterations={i:4},cost={J_history[-1]:0.2e}')
    return w,b,J_history



np.random.seed(666)
x = np.linspace(-3,3,100)
x = x.reshape(-1,1)
y = 1.5*x**2 + 3*x -2*x**3 + 2 + np.random.normal(0,1,size=100).reshape(-1,1)

x_train = x
y_train = y


# 回归过程
# 多次回归其实是把x的高次，当作新的feature做回归，所以w初始化要注意形状，因为拟合一个3次式，所以用3
w_init = np.matrix(np.zeros(3)).T
b_init = 0

w_final,b_final,J_history = gradient_descent(x_train,y_train,w_init,b_init,1.0e-2,1000,compute_cost,compute_gradient,3)

print(f'w={w_final},b={b_final}')


# 绘图
fig,(ax1,ax2) = plt.subplots(1,2,constrained_layout = True,figsize = (12,4))
ax1.plot(J_history)
ax1.set_title('Convergence')
ax1.set_xlabel('Iterations')
ax1.set_ylabel('Cost')
ax2.plot(x_train,np.array(compute_model_output(x_train,w_final,b_final,3)),c='r',label='Prediction')
ax2.scatter(np.array(x_train),np.array(y_train))
plt.show()

