"""
使用梯度下降的原理实现线性回归
"""
import matplotlib.pyplot as plt
import numpy as np

x = np.array([0.5,0.6,0.8,1.1,1.4])
y = np.array([5.0,5.5,6.0,6.8,7.2])

# plt.scatter(x,y)
# plt.grid(linestyle=":")
# plt.show()

# 实现梯度下降
# 模型： y = w1*x + w0
w1 = 1 # 常用的初始值
w0 = 1 # 常用的初始值为0或者1
# 超参数
learn_rate = 0.01 # 学习率，不能设太大
epoch = 500 # 训练轮数

w0s = []
w1s = []
losses = []
epoches = []

for i in range(epoch):
    loss = ((w1 * x + w0 - y)**2).sum()/2
    print("epoch:{:3},w1:{:.8f},w0:{:.8f},loss:{:.8f}".format(i+1,w1,w0,loss))

    # 收集数值，用于模型参数可视化
    w0s.append(w0)
    w1s.append(w1)
    losses.append(loss)
    epoches.append(i+1)

    d0 = (w0 + w1 * x - y).sum()
    d1 = (x * (w1 * x + w0 -y)).sum()
    w0 = w0 - learn_rate * d0
    w1 = w1 - learn_rate * d1

# print(w0,w1)
# pred_y = w1 * x + w0 # 预测值
# plt.plot(x,pred_y,c='orangered')
# plt.grid(linestyle=':')
# plt.show()

# 模型参数可视化
plt.figure('Training params')

plt.subplot(3,1,1)
plt.plot(epoches,w0s,color="dodgerblue",label='w0')
plt.grid(linestyle=':')
plt.legend()

plt.subplot(3,1,2)
plt.plot(epoches,w1s,color="dodgerblue",label='w1')
plt.grid(linestyle=':')
plt.legend()

plt.subplot(3,1,3)
plt.plot(epoches,losses,color="orangered",label='loss')
plt.grid(linestyle=':')
plt.legend()

plt.show()