# -*- coding: utf-8 -*-
"""
@file name  : hook_methods.py
@author     : QuZhang
@date       : 2020-12-30 11:16
@brief      : pytorch的hook函数
"""
import torch
import torch.nn as nn
from tools.common_tools import set_seed


set_seed(1)

if __name__ == "__main__":

    # ----------- 1 Tensor.register_hook 1 -------------
    # 注册一个反向传播时，自动调用的hook回调函数
    # 1 保存非叶子节点的梯度
    # flag = True
    flag = False
    if flag:
        w = torch.tensor([1.0], requires_grad=True)  # 叶子节点
        x = torch.tensor([2.0], requires_grad=True)
        a = torch.add(x, w)
        b = torch.add(w, 1)
        y = torch.mul(a, b)

        a_grad = list()  # 保存梯度的列表

        def grad_hook(grad):
            a_grad.append(grad)  # 反向传播时，计算出被注册张量的梯度后，将梯度值回传给grad_hook的参数grad，保存梯度

        handle = a.register_hook(grad_hook)  # 注册一个张量a的梯度回调函数grad_hook函数

        y.backward()

        # 查看梯度
        print("gradient:", w.grad, x.grad, a.grad, b.grad, y.grad)
        print("a_grad[0]: ", a_grad[0])
        handle.remove()  # 删除句柄

    # ------------- 2 Tensor.register_hook 2 --------------
    # 2 修改梯度
    # flag = True
    flag = False
    if flag:
                w = torch.tensor([1.], requires_grad=True)
                x = torch.tensor([2.], requires_grad=True)
                a = torch.add(w, x)
                b = torch.add(w, 1)
                y = torch.mul(a, b)

                def grad_hook(grad):
                    grad *= 2
                    return grad*3

                handle = w.register_hook(grad_hook)

                y.backward()

                # 查看梯度
                print("w.grad: ", w.grad)
                handle.remove()

    # ------------- 3 Module hook ----------
    flag = True
    if flag:
        class Net(nn.Module):
            def __init__(self):
                super().__init__()
                self.conv1 = nn.Conv2d(1, 2, 3, stride=1, padding=0)
                self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)

            def forward(self, inputs):
                inputs = self.conv1(inputs)
                inputs = self.pool1(inputs)
                return inputs

        def forward_hook(module, data_input, data_output):
            """保存输入特征图和输出特征图，前向传播时自动调用
            """
            fmap_block.append(data_output)  # hook回调, 将输出特征图保存到fmap_block列表
            input_block.append(data_input)

        def forward_pre_hook(module, data_input):
            """查看输入数据,前向传播时，自动调用
            """
            print("forward_pre_hook input:{}".format(data_input))

        def backward_hook(module, grad_input, grad_output):
            """查看梯度,反向传播时，自动调用
            """
            print("backward hook input:{}".format(grad_input))
            print("backward hook output:{}".format(grad_output))

        # 初始化网络
        net = Net()
        net.conv1.weight[0][0].detach().fill_(1)  # 初始化卷积层的第一个核的第一个通道
        # print(net.conv1.weight[0])
        net.conv1.weight[1].detach().fill_(2)  # 初始化卷积层的第2个核
        net.conv1.bias.data.detach().zero_()

        # 注册回调函数hook
        fmap_block = list()  # 输出特征图
        input_block = list()  # 输入特征图
        net.conv1.register_forward_hook(forward_hook)  # 注册回调函数forward_hook,在前向传播时，会调用回调函数来保存特征图
        net.conv1.register_forward_pre_hook(forward_pre_hook)
        net.conv1.register_backward_hook(backward_hook)  # 注册回调函数backward_hook

        # inference
        fake_img = torch.ones((1, 1, 4, 4))
        output = net(fake_img)

        loss_fnc = nn.L1Loss()
        target = torch.randn_like(output)
        loss = loss_fnc(target, output)
        loss.backward()

        # 观察
        # print("output shape: {}\noutput value: {}\n".format(output.shape, output))
        # print("feature maps shape: {}\noutput value: {}\n".format(fmap_block[0].shape, fmap_block[0]))
        # print("input shape: {}\ninput value: {}".format(input_block[0][0].shape, input_block[0]))