import math
import torch
import matplotlib.pyplot as plt

"""
RMSProp算法，解决AdaGrad算法的缺点（后期学习率衰减过大，可能会早停）
算法的改进添加一个系数
St = bate * St-1 + (1-beta) * g * g
"""

# 1. 定义损失函数
def loss_fn(w1, w2):
    return w1 ** 2 + 2 * w2 ** 2

# 2. 超参数
lr = 0.5
Epochs = 5
w1= -1
w2 = 1
S1 = 0  # 梯度平方累积和
S2 = 0
epsilon = 1e-7
beta = 0.3

''' 绘制等高线图 '''
x1 = torch.linspace(-1, 1, 100)
x2 = torch.linspace(-1, 1, 100)
xx1, xx2 = torch.meshgrid(x1, x2, indexing="ij")
loss = loss_fn(xx1, xx2)
fig = plt.figure("RMSProp")
ax = fig.add_subplot()
ax.contour(xx1, xx2, loss)
# 定义一个列表，用于存储梯度下降的路径点
points = []

# 3. 循环训练
for epoch in range(Epochs):
    points.append([w1, w2])     # 保存参数点
    loss = loss_fn(w1, w2)
    print(loss)
    # ==== RMSProp算法 ====
    # 计算梯度
    g1 = 2 * w1
    g2 = 4 * w2
    # 累计梯度平方和
    S1 = beta * S1 + (1-beta) * g1*g1
    S2 = beta * S2 + (1-beta) * g2*g2
    # 更新参数
    w1 -= lr * g1 / math.sqrt(S1 + epsilon)
    w2 -= lr * g2 / math.sqrt(S2 + epsilon)
    print(f"w1_lr = {lr/math.sqrt(S1 + epsilon):.4f} "
          f"w2_lr = {lr/math.sqrt(S2 + epsilon):.4f}")

points = torch.tensor(points)
ax.plot(points[:,0], points[:,1], "ko-")
plt.show()
