from torch.utils.data import Dataset
import torch
from PIL import Image
import os
from torchvision import datasets, models, transforms
from torch.utils.data import DataLoader
data_transforms = transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

class MyData(Dataset):
    def __init__(self, root_dir, label_dir,transform=None):
        self.root_dir = root_dir
        self.label_dir = label_dir
        self.path = os.path.join(self.root_dir, self.label_dir)
        self.img_path = os.listdir(self.path)  # 将这个路径下的文件变成一个列表的形式
        self.transform=transform

    def __getitem__(self, idx):  # 想要获取每一个图片
        img_name = self.img_path[idx]  # 在这个列表下 用idx看是第几个图
        # '0013035.jpg' str类型的 img的name
        img_item_path = os.path.join(self.root_dir, self.label_dir, img_name)
        # 将这个名字和路径进行拼接 就能得到这个图片的相对路径
        img = Image.open(img_item_path)
        if(self.transform is not None):
            img = self.transform(img)
        # 这样就能得到这个img
        if(self.label_dir=="real"):
            label=1
        else:
            label=0
        # 标签 label
        return img, label

    def __len__(self):
        return len(self.img_path)


def divideDataset():
    realDataset = MyData("testdata", "real",data_transforms);
    fakeDataset = MyData("testdata", "fake",data_transforms);
    custom_dataset = realDataset + fakeDataset;
    train_size = int(len(custom_dataset) * 0.7)
    test_size = len(custom_dataset) - train_size
    train_dataset, test_dataset = torch.utils.data.random_split(custom_dataset, [train_size, test_size])
    train_size = int(len(train_dataset) * 0.7)
    valid_size = len(train_dataset) - train_size
    train_dataset, valid_dataset = torch.utils.data.random_split(train_dataset, [train_size, valid_size])
    return train_dataset,valid_dataset,test_dataset

if __name__=='__main__':
    realDataset=MyData("testdata","real",data_transforms)
    fakeDataset=MyData("testdata","fake",data_transforms)
    custom_dataset=realDataset+fakeDataset

    train_size = int(len(custom_dataset) * 0.7)
    test_size = len(custom_dataset) - train_size
    train_dataset, test_dataset = torch.utils.data.random_split(custom_dataset, [train_size, test_size])

    train_size=int(len(train_dataset)*0.7)
    valid_size=len(train_dataset)-train_size
    train_dataset,valid_dataset=torch.utils.data.random_split(train_dataset,[train_size,valid_size])

    print(len(train_dataset),len(valid_dataset),len(test_dataset))
    print(len(DataLoader(train_dataset, batch_size=16, shuffle=True)))