# -*- coding: utf-8 -*-
import os.path as osp
import pickle
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms as transforms

BASEDIR = osp.dirname(osp.abspath(__file__))


def load(filename):
    with open(filename, 'rb') as fo:
        data = pickle.load(fo, encoding='latin1')
    return data


def get_cifar_as_numpy(path_file):
    data = load(path_file)
    labels = data['labels']  # list
    filenames = data['filenames']  # list
    fnames = ["_".join(fname.split('_')[:-1]) for fname in filenames]  # list

    imgArr = data['data'].reshape((-1, 3, 32, 32)).transpose((0, 2, 3, 1))  # ndarray
    return imgArr, labels, fnames


class CifarDataset(Dataset):
    def __init__(self, cifar_data, transform=None):
        super(CifarDataset, self).__init__()
        self.imgArr = cifar_data[0]
        self.labels = cifar_data[1]
        self.fnames = cifar_data[2]
        self.transform = transform

    def __getitem__(self, index):
        t_img = torch.tensor(self.imgArr[index], dtype=torch.float32)
        t_img = t_img.permute((2,0,1))
        if self.transform:
            t_img = self.transform(t_img)
        return t_img, self.labels[index], self.fnames[index]

    def __len__(self):
        return len(self.labels)


path_file = osp.join(BASEDIR, '..', r'data_batch_1')
data_train = get_cifar_as_numpy(path_file)

train_transform = transforms.Compose([
    transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5)),
])

train_set = CifarDataset(data_train, transform=train_transform)
train_loader = DataLoader(train_set, batch_size=8)

a_batch_data = next(iter(train_loader))
t_img, labels, fnames = a_batch_data
print(t_img.shape)
print(t_img.dtype)
print(labels)
print(fnames)
