import torch
import torch.nn as nn
from torch import autograd
import torch.nn.functional as F

inputs_tensor = torch.FloatTensor([
	[[2, 4],
	 [1, 2]],
	[[5, 3],
	 [3, 0]],
	[[5, 3],
	 [5, 2]],
	[[4, 2],
	 [3, 2]],
])
print('--input size(nBatch x nClasses x height x width): ', inputs_tensor.shape)
inputs_tensor = torch.unsqueeze(inputs_tensor, 0)
# inputs_tensor = torch.unsqueeze(inputs_tensor,1)
print('--input size(nBatch x nClasses x height x width): ', inputs_tensor.shape)

targets_tensor = torch.LongTensor([
	[0, 2],
	[2, 3]
])

targets_tensor = torch.unsqueeze(targets_tensor, 0)
print('--target size(nBatch x height x width): ', targets_tensor.shape)


inputs_variable = autograd.Variable(inputs_tensor, requires_grad=True)
inputs_variable = F.log_softmax(inputs_variable)
print('--inputs_variable size(nBatch x nClasses x height x width): ', inputs_variable.shape)

targets_variable = autograd.Variable(targets_tensor)
print('--targets_variable size(nBatch x nClasses x height x width): ', targets_variable.shape)

loss = nn.NLLLoss2d()

output = loss(inputs_variable, targets_variable)
print('--NLLLoss2d: {}'.format(output))
