import torch
import torch.nn as nn

#a=torch.tensor([3, 5])
# 二维三维选择下标为1的数
a=torch.randn([13, 3, 5])
print(a)
bn1=nn.BatchNorm1d(3)
print(bn1(a))
print("=====================================================")
#四维
b=torch.randn([13, 2, 3, 4])
bn2 = nn.BatchNorm2d(2)
print(bn2(b))
print("====================================================")
#五维
c=torch.randn([2, 3, 4, 5, 6])
bn3 = nn.BatchNorm3d(3)
print(bn3(c))

dropout=nn.Dropout(p=0.5)
dropout()

# p = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5], [0.5])])
# train = datasets.MNIST('data', transform=p, train=True, download=True)
# train_loader = DataLoader(train, batch_size=16, shuffle=True)
# samples = iter(train_loader)
# images, labels = next(samples)
#
# # images.view(images.shape[0], -1).shape
# images.reshape(images.shape[0], -1)
# # torch.flatten(images, 2).shape
#
# net = MyNet()
# loss_fun = nn.CrossEntropyLoss()
# optim = torch.optim.SGD(net.parameters(), lr=0.01)
#
# losses = []
# acces = []
# num_epoch = 1
#
# # 取出每一行中最大值下标
# a = torch.randn(3, 4)
# val, index = a.max(dim=1)
# idx = a.argmax(1)
#
# for epoch in range(num_epoch):
#     train_loss = 0.0
#     train_acc = 0.0
#     net.train()
#     for img, label in train_loader:
#         img = img.view(img.shape[0], -1)
#         out = net(img)
#         optim.zero_grad()
#         loss = loss_fun(out, label)
#         loss.backward()
#         optim.step()
#         pred = out.argmax(dim=1)
#         num_correct = (pred==label).sum().item()
#         acc = num_correct/img.shape[0]
#         train_acc += acc
#         train_loss += loss.item()
#     losses.append(train_loss/len(train_loader))
#     acces.append(train_acc/len(train_loader))
#     print(f'Epoch:{epoch + 1}, train_loss:{train_loss / len(train_loader):.4f}, train_acc:{train_acc / len(train_loader):.4f}')