import os

import numpy as np
from d2l import torch as d2l
import pandas as pd
import torch
import torchvision
from PIL import Image
# from torchvision import transforms


# train_ds, train_valid_ds = [
#     torchvision.datasets.ImageFolder(
#         os.path.join(data_dir, 'train_valid_test', folder),
#         transform=transform_train) for folder in ['train', 'train_valid']]
#
# valid_ds, test_ds = [
#     torchvision.datasets.ImageFolder(
#         os.path.join(data_dir, 'train_valid_test', folder),
#         transform=transform_test) for folder in ['valid', 'test']]

# train_iter, train_valid_iter = [
#     torch.utils.data.DataLoader(dataset, batch_size, shuffle=True,
#                                 drop_last=True)
#     for dataset in (train_ds, train_valid_ds)]
#
# valid_iter = torch.utils.data.DataLoader(valid_ds, batch_size, shuffle=False,
#                                          drop_last=True)
#
# test_iter = torch.utils.data.DataLoader(test_ds, batch_size, shuffle=False,
#                                         drop_last=False)

def read_data_bananas(is_train=True):
    """Read the banana detection dataset images and labels."""
    data_dir = d2l.download_extract('banana-detection')
    # data_dir =
    csv_fname = os.path.join(data_dir,
                             'bananas_train' if is_train else 'bananas_val',
                             'label.csv')
    csv_data = pd.read_csv(csv_fname)
    csv_data = csv_data.set_index('img_name')
    images, targets = [], []
    for img_name, target in csv_data.iterrows():
        images.append(
            # torchvision.io.read_image(
            os.path.join(data_dir,
                         'bananas_train' if is_train else 'bananas_val',
                         'images', f'{img_name}')
            # )
        )
        # Here `target` contains (class, upper-left x, upper-left y,
        # lower-right x, lower-right y), where all the images have the same
        # banana class (index 0)
        targets.append(list(target))
    return images, torch.tensor(targets).unsqueeze(1) / 256


# class Compose(object):
#     """组合多个transform函数"""
#     def __init__(self, transforms):
#         self.transforms = transforms
#
#     def __call__(self, image, target=None):
#         for trans in self.transforms:
#             image, target = trans(image, target)
#         return image, target


train_transforms = torchvision.transforms.Compose([
    # torchvision.transforms.SSDCropping(),
    torchvision.transforms.Resize(size=(256, 256)),
    # torchvision.transforms.ColorJitter(),
    torchvision.transforms.ToTensor(),
    # torchvision.transforms.RandomHorizontalFlip(),
    # torchvision.transforms.Normalization(),
    # torchvision.transforms.Normalize(0, 1),
    # torchvision.transforms.AssignGTtoDefaultBox()
]),


class BananasDataset(torch.utils.data.Dataset):
    """A customized dataset to load the banana detection dataset."""

    def __init__(self, is_train, transforms=None):
        self.features, self.labels = read_data_bananas(is_train)
        self.transforms = transforms
        print('read ' + str(len(self.features)) + (
            f' training examples' if is_train else f' validation examples'))

    def __getitem__(self, idx):
        # image = torchvision.io.read_image(self.features[idx]).float(),  #
        image = Image.open(self.features[idx])
        target = self.labels[idx]

        for t in train_transforms:
            image = t(image)

            # image = torch.from_numpy(np.array(t(image)))

        # if self.transforms is not None:
            # image = self.transforms(image)
        #     image, target = self.transforms(image, target)
        return image*255, target

        # return image[0], target
        # return torchvision.io.read_image(self.features[idx]).float(), target

    def __len__(self):
        return len(self.features)
