import numpy as np
import matplotlib.pyplot as plt

'''
梯度下降算法
* 1 只可以求出局部最优解，不能求出全局最优解
* 2 全局最优需要随机梯度下降算法求救 （见2）
'''


x_data = [1.0, 2.0, 3.0]
y_data = [2.0, 4.0, 6.0]

w = 1.0


def forward(x):
    return w * x

def cost(xs, ys):
    '''
    花费函数
    :param xs:
    :param ys:
    :return:
    '''
    cost = 0
    for x, y in zip(xs, ys):
        y_pred = forward(x)
        cost += (y - y_pred) ** 2
    return cost / len(xs)

def gradient(xs, ys):
    '''
    梯度下降求解，w = w - alpha * 偏导数
    本函数是针对求解偏导数计算所得
    :param xs:
    :param ys:
    :return:
    '''
    grad = 0
    for x, y in zip(xs, ys):
        grad += 2 * x * (forward(x) - y)
    return grad / len(xs)


i_list = []
loss_list = []
print("Predict (before training):", 5,forward(5))
for i in range(100):
    # 进行计算
    '''
    对每个数值进行匹配配对，计算出每次的w，
    这个地方不实行for循环迭代的原因是在gradient已经做过1/N的叠加处理
    '''
    cost_val = cost(x_data, y_data)
    grad_val = gradient(x_data, y_data)
    w -= 0.01 * grad_val
    i_list.append(i)
    loss_list.append(cost_val)
    print("Epoch：" ,i," w = ",w," predict = ",forward(4)," loss = ",cost_val)
print("Predict (after training):", 5,forward(5))

plt.plot(i_list, loss_list)
plt.xlabel('i')
plt.ylabel('Los')
plt.show()





