import numpy as np
import matplotlib.pyplot as plt

points=np.array([[1,1],[2,3],[3,2],[4,3]])
# 提取2列数据
x=points[:,0]
y=points[:,1]
# 定义损失函数
def compute_cost(w,b,points):
    total_cost=0
    M=len(points)
    for i in range(M):
        x=points[i,0]
        y=points[i,1]
        total_cost+=(y-w*x-b)**2
    return total_cost/M
# 定义模型的超参数
# 步长,初始值,迭代次数
alpha=0.1
initial_w=0
initial_b=0
num_iter=10
# 梯度下降函数
def grad_desc(points,alpha,initial_w,initial_b,num_iter):
    w=initial_w
    b=initial_b
    # 定义一个list保存所有的损失函数值,用来显示下降的过程
    cost_list=[]
    for i in range(num_iter):
        cost_list.append(compute_cost(w,b,points))
        w,b=step_grad_desc(w,b,alpha,points)
    return [w,b,cost_list]
def step_grad_desc(current_w,current_b,alpha,points):
    sum_grad_w=0
    sum_grad_b=0
    M=len(points)
    for i in range(M):
        x=points[i,0]
        y=points[i,1]
        sum_grad_w+=(current_w*x+current_b-y)*x
        sum_grad_b+=current_w*x+current_b-y
    # 当前梯度
    grad_w=2/M*sum_grad_w
    grad_b=2/M*sum_grad_b
    # 梯度下降,更新当前的w和b
    updated_w=current_w-alpha*grad_w
    updated_b=current_b-alpha*grad_b
    return updated_w,updated_b
# 测试:运行梯度下降算法计算最优的w和b
w,b,cost_list=grad_desc(points,alpha,initial_w,initial_b,num_iter)
print(w)
print(b)
cost=compute_cost(w,b,points)
print(cost)
plt.plot(cost_list)
plt.show()
plt.scatter(x,y)
pred_y=w*x+b
plt.plot(x,pred_y,c='r')
plt.show()
