import torch
import torchvision
from torch.utils.data import DataLoader
from torchvision import datasets,transforms

class imagenet():
    def __init__(self,path=None,b_sz=None,n_workers=None):
        self.tf_train = transforms.Compose([
            transforms.RandomSizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(mean = [ 0.485, 0.456, 0.406 ],
                                 std = [ 0.229, 0.224, 0.225 ])
        ])
        self.train_dir = path + 'train'
        self.val_dir = path + 'val'
        self.train_set = datasets.ImageFolder(self.train_dir,self.tf_train)
        self.val_set = datasets.ImageFolder(self.val_dir,self.tf_train)
        self.train_loader = torch.utils.data.DataLoader(
                            self.train_set, batch_size=b_sz, shuffle=True, num_workers=n_workers)
        self.val_loader = torch.utils.data.DataLoader(
                            self.val_set, batch_size=100, shuffle=True, num_workers=n_workers)
    
    def random_split(self):
        pass

class CIFAR10():
    def __init__(self,path=None,b_sz=None,n_workers=None):
        self.tf_train = transforms.Compose([
            transforms.RandomCrop(32, padding=4),  #先四周填充0，在吧图像随机裁剪成32*32
            transforms.RandomHorizontalFlip(),  #图像一半的概率翻转，一半的概率不翻转
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), #R,G,B每层的归一化用到的均值和方差
        ])
        self.train_db = torchvision.datasets.CIFAR10(root=path,train=True,download=False,transform=self.tf_train)
        self.b_sz = b_sz
        self.n_workers = n_workers
        self.random_split()

    def random_split(self):
        train_set, val_set = torch.utils.data.random_split(self.train_db,[40000,10000])

        self.train_loader = torch.utils.data.DataLoader(train_set,batch_size=self.b_sz,shuffle=True,num_workers=self.n_workers)
        self.val_loader = torch.utils.data.DataLoader(val_set,batch_size=self.b_sz,shuffle=True,num_workers=self.n_workers)
