# coding:utf-8
# Author : hiicy redldw
# Date : 2019/05/15
import torch
import numpy as np

a = np.array([2, 4, 7], dtype=np.float)
# x = torch.ones(2, 2, requires_grad=True)
x = torch.from_numpy(a)
x.requires_grad = True
y = x ** 2 + 2
# y = x+2
z = y*y*3
print(y.grad_fn)
# out = z.mean()
sum = z.sum()
# print(out,out.grad_fn)
print(sum,sum.grad_fn)
sum.backward()  # y.backward()的第一个参数，即是L关于y的梯度的值
print(x.grad)

# 可以理解成每个out分量对an求导时的权重
y.backward(torch.DoubleTensor([1, 1, 1]),keep_graph=True)
print(y, y.grad)
print(x.grad)

# [Pytorch中的自动求导函数backward()所需参数含义](https://www.cnblogs.com/JeasonIsCoding/p/10164948.html)
