from GIB.pytorch_net.util import eval_tuple
import torch
import numpy as np
import torch.nn.functional as F
from torch_geometric.nn.inits import glorot, zeros

# is_cuda = False
# x = "(\"Nsampling\",'Bernoulli',0.1,0.5,\"norm\")"
# print(eval_tuple(x)[-1])
# if not isinstance(False, bool):
#     print(is_cuda)
# print(is_cuda if isinstance(is_cuda, str) else "cuda" if is_cuda else "cpu")
# if torch.cuda.is_available():
#     print(True)
# device = torch.device(0)

# x = torch.FloatTensor([1, 2, 3, 4, 5, 6])
# print(x)

# print(F.dropout(x, 0.6, True, inplace=True))
#
# att = torch.Tensor(1, 1, 2 * 7)
# torch.FloatTensor([[1, 2, 3], [4, 5, 6]])
# a = torch.Tensor(1, 2 * 7)


# a = [torch.tensor([0.7, 0.3, 0.2, 0.8]),
#      torch.tensor([0.5, 0.9, 0.5, 0.5])]
# print(torch.stack(a, 1))
# print(torch.stack(a, 1).shape)
# print(torch.stack(a, 1).mean(0))
# print(torch.stack(a, 1).mean(0).sum())
#
# for i in range(1):
#     print(i)
# a = torch.FloatTensor([[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]])
# b = torch.FloatTensor([[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]])
#
# print(torch.cat(a, b, -1))
# print(a.view(-1, 2))
# w = torch.FloatTensor([[1, 0], [1, 1], [0, 1]])
# print(w)
# print(torch.matmul(a, w))





data = torch.FloatTensor([[1.0, 2.0, 3.0], [4.0, 6.0, 8.0]])
print(data)
print(data.shape)
print(data.type())

prob = F.softmax(data, dim=1)  # dim = 0,在列上进行Softmax;dim=1,在行上进行Softmax
print(prob)
print(prob.shape)
