from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import pathlib
from PIL import Image
import os
from cat_dog_torch.settings import *
import numpy as np

class DogCatData(Dataset):
    def __init__(self, root, split_func = None, test = False):
        self.test = test
        #  读取root目录下的所有图片
        imgs = list(pathlib.Path(root).glob('*.jpg'))
        # 分割回调函数切割成训练集和测试集以及验证集
        self.imgs = split_func(imgs) if split_func else imgs
        normalize = transforms.Normalize(
            mean = [0.485, 0.456, 0.406],
            std = [0.229, 0.224, 0.225]
        )

        self.transforms = transforms.Compose([
            transforms.Resize(224),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize
        ])

    def get_image(self, image_path):
        return self.transforms(Image.open(image_path))

    def __len__(self):
        return len(self.imgs)

    def __getitem__(self, index):
        img_path = self.imgs[index]
        filename = os.path.basename(img_path).replace('.jpg', '')
        if self.test:
            label = int(filename)
        else:
            label = 1 if 'dog' in filename else 0
        image = self.transforms(Image.open(img_path))

        return image, label

train_dataset = DogCatData(root = TRAIN_DATA_PATH, split_func=lambda x: x[:int(0.7 * len(x))])
test_dataset = DogCatData(root = TEST_DATA_PATH, test=True)
val_dataset = DogCatData(root = TRAIN_DATA_PATH,
                          split_func=lambda x: x[int(0.7 * len(x)):])

train_dataloader = DataLoader(
    dataset=train_dataset,
    shuffle=True,
    batch_size=BATCH_SIZE,
    num_workers=NUM_WORKERS
)
test_dataloader = DataLoader(
    dataset = test_dataset,
    shuffle=True,
    batch_size=BATCH_SIZE,
    num_workers=NUM_WORKERS
)
val_dataloader = DataLoader(
    dataset=val_dataset,
    shuffle=True,
    batch_size=BATCH_SIZE,
    num_workers=NUM_WORKERS
)
if __name__ == '__main__':

    print(len(train_dataset))
    print(len(test_dataset))
    print(len(val_dataset))

    for image, label in test_dataloader:
        print('image batch dimensions: ', image.size())
        # torch.size([32, 3, 224, 224])
        print('label batchdimensions: ', label.size())
        # torch.size([32])
