from cProfile import label
from turtle import write_docstringdict
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math

x_train = np.matrix([[1],
                     [2],
                     [3]])
y_train = np.matrix([[ 5],
                     [26],
                     [19]])                 #添加这个feature写外面写里面无所谓
X = np.c_[x_train,np.power(x_train[:,0],2)]#关键在于决定是几次的函数，添加这个次方的feature，当多元线性回归
Y = y_train

def compute_model_output(x,w,b):
    xx = np.c_[x,np.power(x[:,0],2)]
    f_wb = xx * w + b
    return f_wb

def compute_cost(x,y,w,b):
    xx = np.c_[x,np.power(x[:,0],2)]
    m = xx.shape[0]
    f_wb = xx * w + b
    cost = np.sum(np.power((f_wb - y),2))/(2*m)
    return cost

def compute_gradient(x,y,w,b):
    xx = np.c_[x,np.power(x[:,0],2)]
    m,n = xx.shape
    f_wb = xx * w + b
    dj_dw = (xx.T * (f_wb - y))/m
    dj_db = np.sum(f_wb - y)/m
    return dj_dw,dj_db

def gradient_descent(x,y,w,b,alpha,num_iters,cost_function,gradient_function):
    J_history = []
    for i in range(num_iters):
        dj_dw,dj_db = compute_gradient(x,y,w,b)
        w -= alpha*dj_dw
        b -= alpha*dj_db

        J_history.append(compute_cost(x,y,w,b))
        if i % math.ceil(num_iters/10) == 0:
            print(f'iterations={i:4},cost={J_history[-1]:0.2e}')
    return w,b,J_history

w_init = np.matrix(np.zeros(X.shape[1])).T
#w_init = np.matrix([[1.0],[1.0]])#不同初值，最后的结果也会不同，大概率是因为迭代次数不够/alpha太小导致
b_init = 0

w_final,b_final,J_history = gradient_descent(x_train,y_train,w_init,b_init,3.0e-2,50000,compute_cost,compute_gradient)

print(f'w={w_final},b={b_final}')
fig,(ax1,ax2) = plt.subplots(1,2,constrained_layout = True,figsize = (12,4))
ax1.plot(J_history)
ax1.set_title('Convergence')
ax1.set_xlabel('Iterations')
ax1.set_ylabel('Cost')
a = np.matrix(np.linspace(0,5,20)).T
print(a.shape)
ax2.plot(a,np.array(compute_model_output(a,w_final,b_final)),c='r',label='Prediction')
ax2.scatter(np.array(x_train),np.array(y_train))
plt.show()

