"""
author：fc
date：  2021/6/10
"""
########################################################
#
# 线性回归的实现：loss=minimizeΣ（wx+b-y）^2
#
######################################################
import numpy as np


def linear_error(w,b,poins):
    """
    计算每个点产生的值和真实值得误差
    :param w: 权重矩阵
    :param b: 偏置参数
    :param poins: 本身是二维数组:[[x0,y0],[x1,y1],...,[xn,yn]]，大小是n*1
    :return: 平均误差
    """
    total_error=0
    for i in range(0,len(poins)):
        x=poins[i,0]
        y=poins[i,1]
        total_error += (y-(w*x+b))**2
    return total_error/float(len(poins))


def step_gradient(w_current,b_current,points,lr):
    """
    手动计算计算每一步的梯度:loss=Σ(WX+b-y)^2, αloss/αW=2(WX+b-y)*x,αloss/αb=2(WX+b-y)
    :param w_current: 当前权重
    :param b_current: 当前偏置
    :param points:    点集
    :param lr:        学习率
    :return:     返回新的偏置和权重
    """
    b_grad=0    #随机定义一个梯度值
    w_grad=0
    N=float(len(points))
    for i in range(0,len(points)):
        x=points[i,0]
        y=points[i,1]
        b_grad += 2*(((w_current*x)+b_current)-y)     # 将所有点的倒数累加，
        w_grad += 2*(((w_current*x)+b_current)-y)*x
    b_grad=b_grad/N       # 计算所有点的平均倒数
    w_grad=w_grad/N
    new_b=b_current-(lr*b_grad)
    new_w=w_current-(lr*w_grad)
    return [new_w,new_b]


def gradient_descent_runner(points,start_b,start_w,lr,num_iterations):
    """
    迭代运行函数以优化w,b
    :param points:
    :param start_b:
    :param start_w:
    :param lr:
    :param num_iterations:
    :return:
    """
    w=start_w
    b=start_b
    for i in range(len(points)):
        w,b=step_gradient(w,b,np.array(points),lr)
    return [w,b]

if __name__ == '__main__':
    pass