import torch
from torch.autograd.function import Function

class MulConstant(Function):
  @staticmethod
  def forward(ctx, tensor, constant):
    # ctx 用来保存信息这里类似self,并且ctx的属性可以在backward中调用
    ctx.constant = constant
    return tensor * constant

  @staticmethod
  def backward(ctx, grad_output):
      return grad_output, None

a = torch.rand(3,3,requires_grad=True)
b = MulConstant.apply(a, 5)
print("a:" + str(a))
print("b:" + str(b))

b.backward(torch.ones_like(a))
print(a.grad)
