# -*- coding: utf-8 -*-
"""
# @file name    : optimizer_methods.py
# @author       : QuZhang
# date          : 2020-12-19
# @brief        : 优化器的5个常用方法
"""
import os
import torch
import torch.optim as optim
from tools.common_tools import  set_seed


BASE_DIR = os.path.dirname(os.path.abspath(__file__))
set_seed(1)

if __name__ == "__main__":
    weight = torch.randn((2, 2), requires_grad=True)
    weight.grad = torch.ones((2, 2))

    optimizer = optim.SGD([weight], lr=0.1)  # 填入网络参数组1，学习率(步长)

    # ------------ step ------------------
    # flag = True
    flag = False
    if flag:
        print("weight before step:{}".format(weight.data))
        optimizer.step()  # 更新：w <-- w - lr×w.grad
        print("weight after step:{}".format(weight.data))

    # ------------ zero_grad ---------------
    # flag = True
    flag = False
    if flag:
        print("weight after step:{}".format(weight.data))
        optimizer.step()
        print("weight after step:{}".format(weight.data))

        print("weight in optimizer:{}\n".format(id(optimizer.param_groups[0]['params'][0])))
        print("weight in weight:", id(weight))
        print("weight.grad is:\n {}\n".format(weight.grad))
        optimizer.zero_grad()  # 梯度清零
        print("after optimizer.zero_grad(), weight.grad is:\n{}".format(weight.grad))

    # ------------ add_param_group -------------
    # 添加新的参数组
    # flag = True
    flag = False
    if flag:
        print("optimizer.param_group is:\n{}".format(optimizer.param_groups))

        weight_2 = torch.randn((3, 3))
        # 添加新参数组到优化器里,并为该参数组设置新的超参数
        optimizer.add_param_group({"params": weight_2, "lr": 0.0001})

        print("optimizer.param_group is:", optimizer.param_groups)

    # ----------- state_dict ---------------
    # 保存优化器当前状态：参数，缓存
    flag = True
    # flag = False
    if flag:
        optimizer = optim.SGD([weight], lr=0.1, momentum=0.9)
        opt_state_dict = optimizer.state_dict()

        print("state_dict before step:\n", opt_state_dict)

        # 更新参数
        for i in range(10):
            optimizer.step()

        print("state_dict after step:\n", optimizer.state_dict())
        # 保存优化器当前的状态
        torch.save(optimizer.state_dict(), os.path.join(BASE_DIR, 'optimizer_state_dict.pkl'))

    # ------------- load state_dict ------------
    flag = True
    if flag:
        optimizer = optim.SGD([weight], lr=0.1, momentum=0.9)
        state_dict = torch.load(os.path.join(BASE_DIR, 'optimizer_state_dict.pkl'))

        print("state_dict before load state:\n", optimizer.state_dict())
        optimizer.load_state_dict(state_dict)  # 根据优化器保存的路径，加载之前的状态到当前，继续训练
        print("state_dict after load state:\n", optimizer.state_dict())
