import math
import torch
import matplotlib.pyplot as plt

"""
Adam算法：是一种广泛用于深度学习的自适应学习率优化算法。它结合了动量（Momentum）和 RMSprop算法的优点，
通过计算梯度的一阶矩估计（均值）和二阶矩估计（未中心化的方差）来动态调整每个参数的学习率。
"""

# 1. 定义损失函数
def loss_fn(w1, w2):
    return w1 ** 2 + 2 * w2 ** 2

# 2. 超参数
lr = 0.05
Epochs = 20
w1 = -1
w2 = 1
m1 = 0
m2 = 0
v1 = 0
v2 = 0
beta1 = 0.9
beta2 = 0.999
epsilon = 1e-8
t = 0

''' 绘制等高线图 '''
x1 = torch.linspace(-1, 1, 100)
x2 = torch.linspace(-1, 1, 100)
xx1, xx2 = torch.meshgrid(x1, x2, indexing="ij")
loss = loss_fn(xx1, xx2)
fig = plt.figure("Adam")
ax = fig.add_subplot()
ax.contour(xx1, xx2, loss)
# 定义一个列表，用于存储梯度下降的路径点
points = []

# 3. 循环训练
for epoch in range(Epochs):
    points.append([w1, w2])     # 保存参数点
    loss = loss_fn(w1, w2)
    print(loss)
    # ==== Adam算法 ====
    # 计算梯度
    g1 = 2 * w1
    g2 = 4 * w2
    # 更新一阶矩估计
    m1 = beta1 * m1 + (1 - beta1) * g1
    m2 = beta1 * m2 + (1 - beta1) * g2
    # 更新二阶矩估计
    v1 = beta2 * v1 + (1 - beta2) * g1 ** 2
    v2 = beta2 * v2 + (1 - beta2) * g2 ** 2
    # 进行偏差修正
    t += 1
    m1_hat = m1 / (1 - beta1 ** t)
    m2_hat = m2 / (1 - beta1 ** t)
    v1_hat = v1 / (1 - beta2 ** t)
    v2_hat = v2 / (1 - beta2 ** t)
    # 更新参数
    w1 -= lr * m1_hat / (math.sqrt(v1_hat) + epsilon)
    w2 -= lr * m2_hat / (math.sqrt(v2_hat) + epsilon)

points = torch.tensor(points)
ax.plot(points[:,0], points[:,1], "ko-")
plt.show()
