import torchvision
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

trans_totensor = torchvision.transforms.ToTensor()
data_set = torchvision.datasets.CIFAR10("./CIFAR10_dataset",train=False,transform=trans_totensor,download=True)
data_loader = DataLoader(dataset=data_set,batch_size=64,shuffle=False,num_workers=0,drop_last=True)
#如果shuffle参数为True的话，两次图片的显示结果就不一样，当为False时，两次的图片分布就是一样的
#batch_size参数表示一次显示多少张照片，也就是将多少张照片作为一个批次
#grop_last参数表示是否在不满足最后一批的个数是舍弃掉最后的照片，False代表不舍弃，True代表舍弃
#print(data_set.classes)
img,target = data_set[0]
print(img.shape)
print(target)
writer = SummaryWriter("dataloader")
for epoch in range(2):
    step = 0
    for data in data_loader:
        imgs, targets = data
        writer.add_images("epoch:{}".format(epoch), imgs, step)
        step = step + 1

    # print(imgs.shape)
    # print(targets)
writer.close()