# In [8]: a=torch.rand(3)

# In [9]: a.requires_grad_()
# Out[9]: tensor([0.0654, 0.4896, 0.1782], requires_grad=True)

# In [10]: from torch.nn import functional as F

# In [11]: p=F.softmax(a,dim=0)

# In [12]: p.backward()

# In [13]: torch.autograd.grad(p[1],[a],retain_graph=True)
# Out[13]: (tensor([-0.1149,  0.2434, -0.1286]),)

# In [14]: torch.autograd.grad(p[2],[a])
# Out[14]: (tensor([-0.0841, -0.1286,  0.2127]),)
