import random
import torch
import math
import matplotlib.pyplot as plt
import streamlit as st


def optimizer(opt_choice):
    if opt_choice == "GD":
        # 定义损失函数
        def loss_fn(w1, w2):
            return w1 ** 2 + w2 ** 2

        # 超参数
        lr = 0.1
        Epochs = 20
        w1 = -1
        w2 = 1
        '''绘制等高线图'''
        x1 = torch.linspace(-1, 1, 100)
        x2 = torch.linspace(-1, 1, 100)
        xx1, xx2 = torch.meshgrid(x1, x2, indexing='ij')
        fig = plt.figure(figsize=(12, 6))
        ax = fig.add_subplot()
        ax.contourf(xx1, xx2, loss_fn(xx1, xx2), cmap='rainbow')
        points = []
        # 循环训练
        for epoch in range(Epochs):
            points.append([w1, w2])
            loss = loss_fn(w1, w2)
            print(loss)
            # gd算法
            g1 = 2 * w1
            g2 = 4 * w2
            w1 = w1 - lr * g1
            w2 = w2 - lr * g2
        points = torch.tensor(points)
        ax.plot(points[:, 0], points[:, 1], 'o-', color='r')
        # plt.show()
        st.pyplot(fig)

    elif opt_choice == "SGD":
        # 1. 生成数据   样本点
        data = []
        for i in range(20):
            x = [random.uniform(0.5, 1.5), random.uniform(0.5, 1.5)]
            target = 0
            data.append((x, target))
        # 2. 超参数
        lr = 0.05
        Epochs = 20
        w1 = -1
        w2 = 1
        ''' 绘制等高线图 '''
        x1 = torch.linspace(-1, 1, 100)
        x2 = torch.linspace(-1, 1, 100)
        xx1, xx2 = torch.meshgrid(x1, x2, indexing="ij")
        loss = xx1 ** 2 + 2 * xx2 ** 2
        fig = plt.figure()
        ax = fig.add_subplot()
        ax.contour(xx1, xx2, loss)
        # 定义一个列表，用于存储梯度下降的路径点
        points = []
        # 3. SGD算法的循环训练（带有数据随机化）
        for epoch in range(Epochs):
            points.append([w1, w2])
            random.shuffle(data)
            total_loss = 0
            for data_point in data:
                x, target = data_point
                # 计算当前 单个数据点的梯度
                pre = w1 * x[0] + w2 * x[1]
                error = pre - target
                loss = error ** 2
                total_loss += loss
                g1 = 2 * error * x[0]
                g2 = 2 * error * x[1]
                # 注意：SGD中使用当前数据点的梯度
                w1 -= lr * g1
                w2 -= lr * g2
            print(f"Loss:{total_loss / len(data):.4f}")
        points = torch.tensor(points)
        ax.plot(points[:, 0], points[:, 1], "ko-")
        # plt.show()
        st.pyplot(fig)

    elif opt_choice == "MBGD":
        def loss_fn_batch(w1, w2, batch_indices):
            batch_loss = 0
            for idx in batch_indices:
                batch_loss += (w1 * x1[idx] + w2 * x2[idx] - 0) ** 2
            # print(f"第{bs+1}批次的损失为：{batch_loss / len(batch_indices)}")
            return batch_loss / len(batch_indices)

        # 1. 模拟数据   为了保证随机抽取 一批次样本 所以需要是定下标索引
        n_samples = 100
        x1 = torch.rand(n_samples) * 2
        x2 = torch.rand(n_samples) * 2
        # 2. 超参数
        lr = 0.05
        Epochs = 20
        batch_size = 16
        w1 = torch.tensor([-1.0], requires_grad=True)  # 开启自动求导，不用手动的计算了
        w2 = torch.tensor([1.0], requires_grad=True)
        ''' 绘制等高线图 '''
        X1 = torch.linspace(-1, 1, 100)
        X2 = torch.linspace(-1, 1, 100)
        xx1, xx2 = torch.meshgrid(X1, X2, indexing="ij")
        loss = xx1 ** 2 + 2 * xx2 ** 2
        fig = plt.figure()
        ax = fig.add_subplot()
        ax.contour(xx1, xx2, loss)
        # 定义一个列表，用于存储梯度下降的路径点
        points = []
        # 3. MBGD算法的循环训练
        for epoch in range(Epochs):
            points.append([w1.item(), w2.item()])
            total_loss = 0
            # 随机打乱数据索引
            # torch.randperm(n) 返回从0到n - 1的整数的随机排列  也就是随机下标索引序列
            indices = torch.randperm(n_samples)
            # 按批次处理
            for bs, i in enumerate(range(0, n_samples, batch_size)):
                batch_indices = indices[i:i + batch_size]
                loss = loss_fn_batch(w1, w2, batch_indices)
                total_loss += loss
                # 反向传播
                loss.backward()
                # 更新参数
                with torch.no_grad():
                    w1 -= lr * w1.grad
                    w2 -= lr * w2.grad
                # 清空梯度 zero_() 就地置零
                w1.grad.zero_()
                w2.grad.zero_()
            print(f"[{epoch + 1}/{Epochs}] Loss: {(total_loss / bs).item():.4f}")
        points = torch.tensor(points)
        ax.plot(points[:, 0], points[:, 1], "ko-")
        # plt.show()
        st.pyplot(fig)

    elif opt_choice == "Momentum":
        # 1. 定义损失函数
        def loss_fn(w1, w2):
            return w1 ** 2 + 2 * w2 ** 2

        # 2. 超参数
        lr = 0.05
        Epochs = 20
        beta = 0.9
        w1 = -1
        w2 = 1
        v1 = 0
        v2 = 0
        ''' 绘制等高线图 '''
        x1 = torch.linspace(-1, 1, 100)
        x2 = torch.linspace(-1, 1, 100)
        xx1, xx2 = torch.meshgrid(x1, x2, indexing="ij")
        loss = loss_fn(xx1, xx2)
        fig = plt.figure("GD with Momentum")
        ax = fig.add_subplot()
        ax.contour(xx1, xx2, loss)
        # 定义一个列表，用于存储梯度下降的路径点
        points = []
        # 3. 循环训练
        for epoch in range(Epochs):
            points.append([w1, w2])  # 保存参数点
            loss = loss_fn(w1, w2)
            print(loss)
            # ==== GD with Momentum算法 ====
            # 计算梯度
            g1 = 2 * w1
            g2 = 4 * w2
            # 计算动量
            v1 = beta * v1 + (1 - beta) * g1
            v2 = beta * v2 + (1 - beta) * g2
            # 更新参数
            w1 -= lr * v1
            w2 -= lr * v2
        points = torch.tensor(points)
        ax.plot(points[:, 0], points[:, 1], "ko-")
        # plt.show()
        st.pyplot(fig)

    elif opt_choice == "Nesterov":
        # 1. 定义损失函数
        def loss_fn(w1, w2):
            return w1 ** 2 + 2 * w2 ** 2

        # 2. 超参数
        lr = 0.05
        Epochs = 20
        beta = 0.9
        w1 = -1
        w2 = 1
        v1 = 0
        v2 = 0
        ''' 绘制等高线图 '''
        x1 = torch.linspace(-1, 1, 100)
        x2 = torch.linspace(-1, 1, 100)
        xx1, xx2 = torch.meshgrid(x1, x2, indexing="ij")
        loss = loss_fn(xx1, xx2)
        fig = plt.figure("NAG")
        ax = fig.add_subplot()
        ax.contour(xx1, xx2, loss)
        # 定义一个列表，用于存储梯度下降的路径点
        points = []
        # 3. 循环训练
        for epoch in range(Epochs):
            points.append([w1, w2])  # 保存参数点
            loss = loss_fn(w1, w2)
            print(loss)
            # ==== NAG算法 ====
            # 根据累积的动量信息，预测下一个可能的参数位置
            w1_pre = w1 + beta * v1
            w2_pre = w2 + beta * v2
            # 计算 【预测到的下一个可能的参数位置】 的梯度
            g1_pre = 2 * w1_pre
            g2_pre = 4 * w2_pre
            # 更新动量
            v1 = beta * v1 - lr * g1_pre
            v2 = beta * v2 - lr * g2_pre
            # 更新参数
            w1 += v1
            w2 += v2
        points = torch.tensor(points)
        ax.plot(points[:, 0], points[:, 1], "ko-")
        # plt.show()
        st.pyplot(fig)

    elif opt_choice == "AdaGrad":
        # 1. 定义损失函数
        def loss_fn(w1, w2):
            return w1 ** 2 + 2 * w2 ** 2

        # 2. 超参数
        lr = 0.5
        Epochs = 20
        w1 = -1
        w2 = 1
        S1 = 0  # 梯度平方累积和
        S2 = 0
        epsilon = 1e-7
        ''' 绘制等高线图 '''
        x1 = torch.linspace(-1, 1, 100)
        x2 = torch.linspace(-1, 1, 100)
        xx1, xx2 = torch.meshgrid(x1, x2, indexing="ij")
        loss = loss_fn(xx1, xx2)
        fig = plt.figure("AdaGrad")
        ax = fig.add_subplot()
        ax.contour(xx1, xx2, loss)
        # 定义一个列表，用于存储梯度下降的路径点
        points = []
        # 3. 循环训练
        for epoch in range(Epochs):
            points.append([w1, w2])  # 保存参数点
            loss = loss_fn(w1, w2)
            print(loss)
            # ==== AdaGrad算法 ====
            # 计算梯度
            g1 = 2 * w1
            g2 = 4 * w2
            # 累计梯度平方和
            S1 = S1 + g1 * g1
            S2 = S2 + g2 * g2
            # 更新参数
            w1 -= lr * g1 / math.sqrt(S1 + epsilon)
            w2 -= lr * g2 / math.sqrt(S2 + epsilon)
            print(f"lr = {lr / math.sqrt(S1 + epsilon):.4f}")
        points = torch.tensor(points)
        ax.plot(points[:, 0], points[:, 1], "ko-")
        # plt.show()
        st.pyplot(fig)

    elif opt_choice == "RMSProp":
        # 1. 定义损失函数
        def loss_fn(w1, w2):
            return w1 ** 2 + 2 * w2 ** 2

        # 2. 超参数
        lr = 0.5
        Epochs = 5
        w1 = -1
        w2 = 1
        S1 = 0  # 梯度平方累积和
        S2 = 0
        epsilon = 1e-7
        beta = 0.3
        ''' 绘制等高线图 '''
        x1 = torch.linspace(-1, 1, 100)
        x2 = torch.linspace(-1, 1, 100)
        xx1, xx2 = torch.meshgrid(x1, x2, indexing="ij")
        loss = loss_fn(xx1, xx2)
        fig = plt.figure("RMSProp")
        ax = fig.add_subplot()
        ax.contour(xx1, xx2, loss)
        # 定义一个列表，用于存储梯度下降的路径点
        points = []
        # 3. 循环训练
        for epoch in range(Epochs):
            points.append([w1, w2])  # 保存参数点
            loss = loss_fn(w1, w2)
            print(loss)
            # ==== RMSProp算法 ====
            # 计算梯度
            g1 = 2 * w1
            g2 = 4 * w2
            # 累计梯度平方和
            S1 = beta * S1 + (1 - beta) * g1 * g1
            S2 = beta * S2 + (1 - beta) * g2 * g2
            # 更新参数
            w1 -= lr * g1 / math.sqrt(S1 + epsilon)
            w2 -= lr * g2 / math.sqrt(S2 + epsilon)
            print(f"w1_lr = {lr / math.sqrt(S1 + epsilon):.4f} "
                  f"w2_lr = {lr / math.sqrt(S2 + epsilon):.4f}")
        points = torch.tensor(points)
        ax.plot(points[:, 0], points[:, 1], "ko-")
        # plt.show()
        st.pyplot(fig)

    elif opt_choice == "Adam":
        # 1. 定义损失函数
        def loss_fn(w1, w2):
            return w1 ** 2 + 2 * w2 ** 2

        # 2. 超参数
        lr = 0.05
        Epochs = 20
        w1 = -1
        w2 = 1
        m1 = 0
        m2 = 0
        v1 = 0
        v2 = 0
        beta1 = 0.9
        beta2 = 0.999
        epsilon = 1e-8
        t = 0
        ''' 绘制等高线图 '''
        x1 = torch.linspace(-1, 1, 100)
        x2 = torch.linspace(-1, 1, 100)
        xx1, xx2 = torch.meshgrid(x1, x2, indexing="ij")
        loss = loss_fn(xx1, xx2)
        fig = plt.figure("Adam")
        ax = fig.add_subplot()
        ax.contour(xx1, xx2, loss)
        # 定义一个列表，用于存储梯度下降的路径点
        points = []
        # 3. 循环训练
        for epoch in range(Epochs):
            points.append([w1, w2])  # 保存参数点
            loss = loss_fn(w1, w2)
            print(loss)
            # ==== Adam算法 ====
            # 计算梯度
            g1 = 2 * w1
            g2 = 4 * w2
            # 更新一阶矩估计
            m1 = beta1 * m1 + (1 - beta1) * g1
            m2 = beta1 * m2 + (1 - beta1) * g2
            # 更新二阶矩估计
            v1 = beta2 * v1 + (1 - beta2) * g1 ** 2
            v2 = beta2 * v2 + (1 - beta2) * g2 ** 2
            # 进行偏差修正
            t += 1
            m1_hat = m1 / (1 - beta1 ** t)
            m2_hat = m2 / (1 - beta1 ** t)
            v1_hat = v1 / (1 - beta2 ** t)
            v2_hat = v2 / (1 - beta2 ** t)
            # 更新参数
            w1 -= lr * m1_hat / (math.sqrt(v1_hat) + epsilon)
            w2 -= lr * m2_hat / (math.sqrt(v2_hat) + epsilon)
        points = torch.tensor(points)
        ax.plot(points[:, 0], points[:, 1], "ko-")
        # plt.show()
        st.pyplot(fig)


def optimizer_code(opt_choice):
    if opt_choice == "GD":
        sample_code = """
        # 定义损失函数
        def loss_fn(w1, w2):
            return w1 ** 2 + w2 ** 2

        # 超参数
        lr = 0.1
        Epochs = 20
        w1 = -1
        w2 = 1
        '''绘制等高线图'''
        x1 = torch.linspace(-1, 1, 100)
        x2 = torch.linspace(-1, 1, 100)
        xx1, xx2 = torch.meshgrid(x1, x2, indexing='ij')
        fig = plt.figure(figsize=(12, 6))
        ax = fig.add_subplot()
        ax.contourf(xx1, xx2, loss_fn(xx1, xx2), cmap='rainbow')
        points = []
        # 循环训练
        for epoch in range(Epochs):
            points.append([w1, w2])
            loss = loss_fn(w1, w2)
            print(loss)
            # gd算法
            g1 = 2 * w1
            g2 = 4 * w2
            w1 = w1 - lr * g1
            w2 = w2 - lr * g2
        points = torch.tensor(points)
        ax.plot(points[:, 0], points[:, 1], 'o-', color='r')
        plt.show()
        """

    elif opt_choice == "SGD":
        sample_code = """
        # 1. 生成数据   样本点
        data = []
        for i in range(20):
            x = [random.uniform(0.5, 1.5), random.uniform(0.5, 1.5)]
            target = 0
            data.append((x, target))
        # 2. 超参数
        lr = 0.05
        Epochs = 20
        w1 = -1
        w2 = 1
        ''' 绘制等高线图 '''
        x1 = torch.linspace(-1, 1, 100)
        x2 = torch.linspace(-1, 1, 100)
        xx1, xx2 = torch.meshgrid(x1, x2, indexing="ij")
        loss = xx1 ** 2 + 2 * xx2 ** 2
        fig = plt.figure()
        ax = fig.add_subplot()
        ax.contour(xx1, xx2, loss)
        # 定义一个列表，用于存储梯度下降的路径点
        points = []
        # 3. SGD算法的循环训练（带有数据随机化）
        for epoch in range(Epochs):
            points.append([w1, w2])
            random.shuffle(data)
            total_loss = 0
            for data_point in data:
                x, target = data_point
                # 计算当前 单个数据点的梯度
                pre = w1 * x[0] + w2 * x[1]
                error = pre - target
                loss = error ** 2
                total_loss += loss
                g1 = 2 * error * x[0]
                g2 = 2 * error * x[1]
                # 注意：SGD中使用当前数据点的梯度
                w1 -= lr * g1
                w2 -= lr * g2
            print(f"Loss:{total_loss / len(data):.4f}")
        points = torch.tensor(points)
        ax.plot(points[:, 0], points[:, 1], "ko-")
        plt.show()
        """

    elif opt_choice == "MBGD":
        sample_code = """
        def loss_fn_batch(w1, w2, batch_indices):
            batch_loss = 0
            for idx in batch_indices:
                batch_loss += (w1 * x1[idx] + w2 * x2[idx] - 0) ** 2
            # print(f"第{bs+1}批次的损失为：{batch_loss / len(batch_indices)}")
            return batch_loss / len(batch_indices)

        # 1. 模拟数据   为了保证随机抽取 一批次样本 所以需要是定下标索引
        n_samples = 100
        x1 = torch.rand(n_samples) * 2
        x2 = torch.rand(n_samples) * 2
        # 2. 超参数
        lr = 0.05
        Epochs = 20
        batch_size = 16
        w1 = torch.tensor([-1.0], requires_grad=True)  # 开启自动求导，不用手动的计算了
        w2 = torch.tensor([1.0], requires_grad=True)
        ''' 绘制等高线图 '''
        X1 = torch.linspace(-1, 1, 100)
        X2 = torch.linspace(-1, 1, 100)
        xx1, xx2 = torch.meshgrid(X1, X2, indexing="ij")
        loss = xx1 ** 2 + 2 * xx2 ** 2
        fig = plt.figure()
        ax = fig.add_subplot()
        ax.contour(xx1, xx2, loss)
        # 定义一个列表，用于存储梯度下降的路径点
        points = []
        # 3. MBGD算法的循环训练
        for epoch in range(Epochs):
            points.append([w1.item(), w2.item()])
            total_loss = 0
            # 随机打乱数据索引
            # torch.randperm(n) 返回从0到n - 1的整数的随机排列  也就是随机下标索引序列
            indices = torch.randperm(n_samples)
            # 按批次处理
            for bs, i in enumerate(range(0, n_samples, batch_size)):
                batch_indices = indices[i:i + batch_size]
                loss = loss_fn_batch(w1, w2, batch_indices)
                total_loss += loss
                # 反向传播
                loss.backward()
                # 更新参数
                with torch.no_grad():
                    w1 -= lr * w1.grad
                    w2 -= lr * w2.grad
                # 清空梯度 zero_() 就地置零
                w1.grad.zero_()
                w2.grad.zero_()
            print(f"[{epoch + 1}/{Epochs}] Loss: {(total_loss / bs).item():.4f}")
        points = torch.tensor(points)
        ax.plot(points[:, 0], points[:, 1], "ko-")
        plt.show()
        """

    elif opt_choice == "Momentum":
        sample_code = """
        # 1. 定义损失函数
        def loss_fn(w1, w2):
            return w1 ** 2 + 2 * w2 ** 2

        # 2. 超参数
        lr = 0.05
        Epochs = 20
        beta = 0.9
        w1 = -1
        w2 = 1
        v1 = 0
        v2 = 0
        ''' 绘制等高线图 '''
        x1 = torch.linspace(-1, 1, 100)
        x2 = torch.linspace(-1, 1, 100)
        xx1, xx2 = torch.meshgrid(x1, x2, indexing="ij")
        loss = loss_fn(xx1, xx2)
        fig = plt.figure("GD with Momentum")
        ax = fig.add_subplot()
        ax.contour(xx1, xx2, loss)
        # 定义一个列表，用于存储梯度下降的路径点
        points = []
        # 3. 循环训练
        for epoch in range(Epochs):
            points.append([w1, w2])  # 保存参数点
            loss = loss_fn(w1, w2)
            print(loss)
            # ==== GD with Momentum算法 ====
            # 计算梯度
            g1 = 2 * w1
            g2 = 4 * w2
            # 计算动量
            v1 = beta * v1 + (1 - beta) * g1
            v2 = beta * v2 + (1 - beta) * g2
            # 更新参数
            w1 -= lr * v1
            w2 -= lr * v2
        points = torch.tensor(points)
        ax.plot(points[:, 0], points[:, 1], "ko-")
        plt.show()
        """

    elif opt_choice == "Nesterov":
        sample_code = """
        # 1. 定义损失函数
        def loss_fn(w1, w2):
            return w1 ** 2 + 2 * w2 ** 2

        # 2. 超参数
        lr = 0.05
        Epochs = 20
        beta = 0.9
        w1 = -1
        w2 = 1
        v1 = 0
        v2 = 0
        ''' 绘制等高线图 '''
        x1 = torch.linspace(-1, 1, 100)
        x2 = torch.linspace(-1, 1, 100)
        xx1, xx2 = torch.meshgrid(x1, x2, indexing="ij")
        loss = loss_fn(xx1, xx2)
        fig = plt.figure("NAG")
        ax = fig.add_subplot()
        ax.contour(xx1, xx2, loss)
        # 定义一个列表，用于存储梯度下降的路径点
        points = []
        # 3. 循环训练
        for epoch in range(Epochs):
            points.append([w1, w2])  # 保存参数点
            loss = loss_fn(w1, w2)
            print(loss)
            # ==== NAG算法 ====
            # 根据累积的动量信息，预测下一个可能的参数位置
            w1_pre = w1 + beta * v1
            w2_pre = w2 + beta * v2
            # 计算 【预测到的下一个可能的参数位置】 的梯度
            g1_pre = 2 * w1_pre
            g2_pre = 4 * w2_pre
            # 更新动量
            v1 = beta * v1 - lr * g1_pre
            v2 = beta * v2 - lr * g2_pre
            # 更新参数
            w1 += v1
            w2 += v2
        points = torch.tensor(points)
        ax.plot(points[:, 0], points[:, 1], "ko-")
        plt.show()
        """

    elif opt_choice == "AdaGrad":
        sample_code = """
        # 1. 定义损失函数
        def loss_fn(w1, w2):
            return w1 ** 2 + 2 * w2 ** 2

        # 2. 超参数
        lr = 0.5
        Epochs = 20
        w1 = -1
        w2 = 1
        S1 = 0  # 梯度平方累积和
        S2 = 0
        epsilon = 1e-7
        ''' 绘制等高线图 '''
        x1 = torch.linspace(-1, 1, 100)
        x2 = torch.linspace(-1, 1, 100)
        xx1, xx2 = torch.meshgrid(x1, x2, indexing="ij")
        loss = loss_fn(xx1, xx2)
        fig = plt.figure("AdaGrad")
        ax = fig.add_subplot()
        ax.contour(xx1, xx2, loss)
        # 定义一个列表，用于存储梯度下降的路径点
        points = []
        # 3. 循环训练
        for epoch in range(Epochs):
            points.append([w1, w2])  # 保存参数点
            loss = loss_fn(w1, w2)
            print(loss)
            # ==== AdaGrad算法 ====
            # 计算梯度
            g1 = 2 * w1
            g2 = 4 * w2
            # 累计梯度平方和
            S1 = S1 + g1 * g1
            S2 = S2 + g2 * g2
            # 更新参数
            w1 -= lr * g1 / math.sqrt(S1 + epsilon)
            w2 -= lr * g2 / math.sqrt(S2 + epsilon)
            print(f"lr = {lr / math.sqrt(S1 + epsilon):.4f}")
        points = torch.tensor(points)
        ax.plot(points[:, 0], points[:, 1], "ko-")
        plt.show()
        """

    elif opt_choice == "RMSProp":
        sample_code = """
        # 1. 定义损失函数
        def loss_fn(w1, w2):
            return w1 ** 2 + 2 * w2 ** 2

        # 2. 超参数
        lr = 0.5
        Epochs = 5
        w1 = -1
        w2 = 1
        S1 = 0  # 梯度平方累积和
        S2 = 0
        epsilon = 1e-7
        beta = 0.3
        ''' 绘制等高线图 '''
        x1 = torch.linspace(-1, 1, 100)
        x2 = torch.linspace(-1, 1, 100)
        xx1, xx2 = torch.meshgrid(x1, x2, indexing="ij")
        loss = loss_fn(xx1, xx2)
        fig = plt.figure("RMSProp")
        ax = fig.add_subplot()
        ax.contour(xx1, xx2, loss)
        # 定义一个列表，用于存储梯度下降的路径点
        points = []
        # 3. 循环训练
        for epoch in range(Epochs):
            points.append([w1, w2])  # 保存参数点
            loss = loss_fn(w1, w2)
            print(loss)
            # ==== RMSProp算法 ====
            # 计算梯度
            g1 = 2 * w1
            g2 = 4 * w2
            # 累计梯度平方和
            S1 = beta * S1 + (1 - beta) * g1 * g1
            S2 = beta * S2 + (1 - beta) * g2 * g2
            # 更新参数
            w1 -= lr * g1 / math.sqrt(S1 + epsilon)
            w2 -= lr * g2 / math.sqrt(S2 + epsilon)
            print(f"w1_lr = {lr / math.sqrt(S1 + epsilon):.4f} "
                  f"w2_lr = {lr / math.sqrt(S2 + epsilon):.4f}")
        points = torch.tensor(points)
        ax.plot(points[:, 0], points[:, 1], "ko-")
        plt.show()
        """

    elif opt_choice == "Adam":
        sample_code = """
        # 1. 定义损失函数
        def loss_fn(w1, w2):
            return w1 ** 2 + 2 * w2 ** 2
        # 2. 超参数
        lr = 0.05
        Epochs = 20
        w1 = -1
        w2 = 1
        m1 = 0
        m2 = 0
        v1 = 0
        v2 = 0
        beta1 = 0.9
        beta2 = 0.999
        epsilon = 1e-8
        t = 0
        ''' 绘制等高线图 '''
        x1 = torch.linspace(-1, 1, 100)
        x2 = torch.linspace(-1, 1, 100)
        xx1, xx2 = torch.meshgrid(x1, x2, indexing="ij")
        loss = loss_fn(xx1, xx2)
        fig = plt.figure("Adam")
        ax = fig.add_subplot()
        ax.contour(xx1, xx2, loss)
        # 定义一个列表，用于存储梯度下降的路径点
        points = []
        # 3. 循环训练
        for epoch in range(Epochs):
            points.append([w1, w2])  # 保存参数点
            loss = loss_fn(w1, w2)
            print(loss)
            # ==== Adam算法 ====
            # 计算梯度
            g1 = 2 * w1
            g2 = 4 * w2
            # 更新一阶矩估计
            m1 = beta1 * m1 + (1 - beta1) * g1
            m2 = beta1 * m2 + (1 - beta1) * g2
            # 更新二阶矩估计
            v1 = beta2 * v1 + (1 - beta2) * g1 ** 2
            v2 = beta2 * v2 + (1 - beta2) * g2 ** 2
            # 进行偏差修正
            t += 1
            m1_hat = m1 / (1 - beta1 ** t)
            m2_hat = m2 / (1 - beta1 ** t)
            v1_hat = v1 / (1 - beta2 ** t)
            v2_hat = v2 / (1 - beta2 ** t)
            # 更新参数
            w1 -= lr * m1_hat / (math.sqrt(v1_hat) + epsilon)
            w2 -= lr * m2_hat / (math.sqrt(v2_hat) + epsilon)
        points = torch.tensor(points)
        ax.plot(points[:, 0], points[:, 1], "ko-")
        plt.show()
        """
    return sample_code

