"""
对简单的仿射模型进行梯度下降算法,求出最佳权重、最小损失cost
f(x)=x*w
loss=(f(xn)-y)**2
"""

import numpy as np
from matplotlib import pyplot as plt

# 定义已知的数据集,training的dataset
x_data = [1.0, 2.0, 3.0]
y_data = [2.0, 4.0, 6.0]

# 定义初始的权重w
w = 1.0


# 定义前馈函数
def forward(x):
    return x * w


# 定义损失函数
def cost(xs, ys):
    """
    对每个当前的权重w,都进行整个集合的mse均值平方误差计算
    :param xs: x值集合
    :param ys: y值集合
    :return: mse均值平方误差
    """
    # 当前的所有数据集合的损失
    allCost = 0
    # 遍历集合
    for x, y in zip(xs, ys):
        # 计算当前权重下的单个样本的前馈
        y_hat = forward(x)
        # 计算单个样本的loss损失并记录
        allCost += (y_hat - y) ** 2
    # 返回msq均值平方误差
    return allCost / len(xs)


# 定义梯度函数(求取cost对w的导)
def gradient(xs, ys):
    # 定义总的梯度
    allGradients = 0
    # 遍历数据集并计算出损失
    for x, y in zip(xs, ys):
        allGradients += (forward(x) - y) * x
    # 返回当前的梯度
    return allGradients * 2 / len(xs)


allCost = []
epochs = []
print("predict(before training): ", forward(4))
# 进行循环训练
for epoch in range(100):
    tempGradient = gradient(x_data, y_data)
    # 0.01是学习率
    w -= 0.01 * tempGradient

    # 计算当前的损失
    tempCost = cost(x_data, y_data)
    # 记录值
    epochs.append(epoch)
    allCost.append(tempCost)
    print(f"Epoch: {epoch}, w: {w}, Cost: {tempCost}")
print("predict(after training): ", forward(4))

# 绘制损失-轮数图
plt.plot(epochs, allCost)
plt.ylabel("cost")
plt.xlabel("epoch")
plt.grid()
plt.show()
