# coding:utf-8
# __user__ = hiicy redldw
# __time__ = 2019/7/30
# __file__ = hook_
# __desc__ =

# [pytorch中autograd以及hook函数详解](https://cloud.tencent.com/developer/article/1122582)
# [Pytorch中autograd以及hook函数详解](https://oldpan.me/archives/pytorch-autograd-hook)

import torch
import torch.nn as nn

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


class MyMul(nn.Module):
    def forward(self, input):
        out = input * 2
        return out


class MyMean(nn.Module):  # 自定义除法module
    def forward(self, input):
        out = input / 4
        return out


def tensor_hook(grad):
    print('tensor hook')
    print('grad', grad)  # z对x的梯度
    return grad


# y=w_{1} \times x_{1}+w_{2} \times x_{2}+w_{3} \times x_{3}+w_{4} \times x_{4}+b
# z = y/4


class MyNet(nn.Module):
    def __init__(self):
        super(MyNet, self).__init__()
        self.f1 = nn.Linear(4, 1, bias=True)
        self.f2 = MyMean()
        self.weight_init()

    def forward(self, input):
        self.input = input
        output = self.f1(input)  # 先进行运算1，后进行运算2
        output = self.f2(output)
        return output

    def weight_init(self):
        self.f1.weight.data.fill_(8.0)
        self.f1.bias.data.fill_(2.0)

    def my_hook(self, module, grad_input, grad_output):
        """
        module:
        grad_input:REW:z对y的梯度
        grad_output: REW:original outgrad就是z对z的梯度
        """
        print('doing my_hook')
        print('original grad:', grad_input)
        print('original outgrad:', grad_output)
        # grad_input = grad_input[0]*self.input   # 这里把hook函数内对grad_input的操作进行了注释，
        # grad_input = tuple([grad_input])        # 返回的grad_input必须是tuple，所以我们进行了tuple包装。
        # print('now grad:', grad_input)
        return grad_input

if __name__ == '__main__':
    input = torch.tensor([1, 2, 3, 4], dtype=torch.float32, requires_grad=True).to(device)
    net = MyNet()
    # net.to(device)
    
    # rf.body.stage1.register_forward_hook(my_hook("stage1"))
    net.register_backward_hook(net.my_hook)  # 这两个hook函数一定要result = net(input)执行前执行，因为hook函数实在forward的时候进行绑定的
    # REW:对输入的梯度的钩子，hook接收的是输入的反向传播的梯度
    input.register_hook(tensor_hook)
    result = net(input)
    print('result =', result)
    result.backward()
    print('input.grad:', input.grad)
    for param in net.parameters():
        print('{}:grad->{}'.format(param, param.grad))
    ################################################################
    def my_hook(name):
        def hook(module,input,output):
            if not hasattr(input[0],'shape'):
                for k,v in input[0]:
                    dang[k] = v.shape
            else:
                dang[name]=[input[0].shape,output[0].shape]
        return hook

    # from torchsummary import summary
    rf = RetinaFace(cfg)

    img = torch.rand(1,3,320,320)
    # rf.body.stage1.register_forward_hook(my_hook("stage1"))
    # rf.body.stage2.register_forward_hook(my_hook("stage2"))
