import os
import scipy.io
from os.path import join
from torch.utils.data.dataset import Dataset
from torchvision.datasets.folder import default_loader
from torchvision.datasets.utils import download_url, list_dir
from torchvision.transforms import transforms

transform_train = transforms.Compose([
        transforms.Resize((550, 550)),
        transforms.RandomCrop(448, padding=8),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
    ])

transform_test = transforms.Compose([
        transforms.Resize((550, 550)),
        transforms.CenterCrop(448),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
    ])

class DOGDateSet(Dataset):
    def __init__(self, root, train=True):
        self.root = root
        self.loader = default_loader
        self.train = train

        split = self.load_split()

        self.images_folder = join(self.root, 'Images')
        self.annotations_folder = join(self.root, 'Annotation')
        self._breeds = list_dir(self.images_folder)

        self._breed_images = [(annotation + '.jpg', idx) for annotation, idx in split]

        self._flat_breed_images = self._breed_images

    def __len__(self):
        return len(self._flat_breed_images)

    def __getitem__(self, index):
        image_name, target = self._flat_breed_images[index]
        image_path = join(self.images_folder, image_name)
        image = self.loader(image_path)

        if self.train:
            image = transform_train(image)
        else:
            image = transform_test(image)
        return image, target

    def load_split(self):
        if self.train:
            split = scipy.io.loadmat(join(self.root, 'train_list.mat'))['annotation_list']
            labels = scipy.io.loadmat(join(self.root, 'train_list.mat'))['labels']
        else:
            split = scipy.io.loadmat(join(self.root, 'test_list.mat'))['annotation_list']
            labels = scipy.io.loadmat(join(self.root, 'test_list.mat'))['labels']

        split = [item[0][0] for item in split]
        labels = [item[0] - 1 for item in labels]
        return list(zip(split, labels))

    def stats(self):
        counts = {}
        for index in range(len(self._flat_breed_images)):
            image_name, target_class = self._flat_breed_images[index]
            if target_class not in counts.keys():
                counts[target_class] = 1
            else:
                counts[target_class] += 1

        print("%d samples spanning %d classes (avg %f per class)" % (len(self._flat_breed_images), len(counts.keys()),
                                                                     float(len(self._flat_breed_images)) / float(
                                                                         len(counts.keys()))))

        return counts


if __name__ == '__main__':
    from torch.utils.data.dataloader import DataLoader

    train_dataset = DOGDateSet('../dogs', train=True)
    train_loader = DataLoader(train_dataset, batch_size=2, shuffle=False, num_workers=1)
    print(len(train_dataset))
    dataiter = iter(train_loader)
    images, labels = next(dataiter)
    print(images.shape)
    print(labels)

    test_dataset = DOGDateSet('../dogs', train=False)
    test_loader = DataLoader(test_dataset, batch_size=2, shuffle=False, num_workers=1)
    print(len(test_dataset))
    dataiter = iter(test_loader)
    images, labels = next(dataiter)
    print(images.shape)
    print(labels)