import numpy as np

data = []
for i in range(100):  # 循环采样100个点
    x = np.random.uniform(-10., 10.)
    # eps 采样高斯噪声 均值为0 标准差为0.01
    eps = np.random.normal(0., 0.01)
    # 得到模型的输出
    y = 1.477 * x + 0.089 + eps
    data.append([x, y])
data = np.array(data)  # 转换为2D Numpy数组


def mse(b, w, points):
    # 根据当前的w,b参数计算均方误差
    totalError = 0
    for i in range(0, len(points)):
        x = points[i, 0]  # 获得i号点的输入x
        y = points[i, 1]  # 获得i号点的输出y
        # 计算差的平方，并累加
        totalError += (y - (w * x + b)) ** 2

    return totalError / float(len(points))  # 返回均方差


def step_gradient(b_current, w_current, points, lr):
    #  计算误差函数在所有点上的导数，并更新w，b
    b_gradient = 0
    w_gradient = 0
    M = float(len(points))  # 总样本数
    for i in range(0, len(points)):
        x = points[i, 0]  # 获得i号点的输入x
        y = points[i, 1]  # 获得i号点的输出y
        # 误差函数对 b 的导数:grad_b = 2(wx+b-y)
        b_gradient += (2 / M) * ((w_current * x + b_current) - y)
        # 误差函数对 w 的导数:grad_w = 2(wx+b-y)*x
        w_gradient += (2 / M) * x * ((w_current * x + b_current) - y)
        #
    new_b = b_current - (lr * b_gradient)
    new_w = w_current - (lr * w_gradient)

    return [new_b, new_w]


def gradient_descent(points, starting_b, starting_w, lr, num_iterations):
    # 循环更新w,b多次
    b = starting_b
    w = starting_w
    # 根据梯度下降算法更新多次
    for step in range(num_iterations):
        b, w = step_gradient(b, w, points, lr)
        loss = mse(b, w, points)  # 计算当前的均方差，用于监控训练进度
        if step % 50 == 0:  # 打印误差和实时w,b值
            print(f"iteration:{step},loss:{loss},w:{w},b:{b}")
    return [b, w]

def main():
    lr = 0.01
    initial_b = 0
    initial_w = 0
    num_iterations = 1000
    [b,w] = gradient_descent(data,initial_b,initial_w,lr,num_iterations)
    loss = mse(b,w,data) #计算最优数值解w,b上的均方差
    print(f'Final loss:{loss},w:{w},b:{b}')

main()