# Copyright (C) 2022. Huawei Technologies Co., Ltd. All rights reserved.

# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.

# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.


import torch
import torch.nn as nn
import numpy as np
import torchvision

m = nn.LogSoftmax(dim=1)
loss = nn.NLLLoss(reduction="mean")
inp = torch.tensor(
    [
        [1.0, 2.0, 5.0, -1.0, 3.0],
        [-2.0, 5.0, -1.0, 3.0, 2.0],
        [2.0, -4.0, -1.0, 1.0, 1.0],
    ],
    requires_grad=True,
)
target = torch.tensor([1, 0, 4])
t1 = m(inp)
t1.retain_grad()
output = loss(t1, target)
output.backward()
print(t1)
print(t1.grad)
print(output)
print(inp.grad)
