from data.image_folder import make_dataset
from data.base_dataset import BaseDataset, get_transform
from utils.util import normal
import os
import h5py
import numpy as np
from PIL import Image
import torch
class LightFieldDataset(BaseDataset):
    """
    This dataset class can load unaligned/unpaired datasets.

    It requires two directories to host training images from domain A '/path/to/data/trainA'
    and from domain B '/path/to/data/trainB' respectively.
    You can train the model with the dataset flag '--dataroot /path/to/data'.
    Similarly, you need to prepare two directories:
    '/path/to/data/testA' and '/path/to/data/testB' during test time.
    """

    def __init__(self, opt):
        """Initialize this dataset class.

        Parameters:
            opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
        """
        BaseDataset.__init__(self, opt)
        # self.dir_D0 = os.path.join(opt.dataroot, opt.phase , 'D0',opt.controller)  # create a path '/path/to/data/trainD0'
        # self.dir_D45 = os.path.join(opt.dataroot, opt.phase , 'D45',opt.controller)  # create a path '/path/to/data/trainD45'
        # self.dir_D90 = os.path.join(opt.dataroot, opt.phase , 'D90',opt.controller)  # create a path '/path/to/data/trainD90'
        # self.dir_D135 = os.path.join(opt.dataroot, opt.phase , 'D135',opt.controller)  # create a path '/path/to/data/trainD135'
        
        self.isTrain = (opt.controller == 'train')
        self.times_steps = opt.times_steps
        # self.D0_paths = sorted(make_dataset(self.dir_D0, opt.max_dataset_size))   # load images from '/path/to/data/trainA'
        # self.D45_paths = sorted(make_dataset(self.dir_D45, opt.max_dataset_size))    # load images from '/path/to/data/trainB
        # self.D90_paths = sorted(make_dataset(self.dir_D90, opt.max_dataset_size))   # load images from '/path/to/data/trainA'
        # self.D135_paths = sorted(make_dataset(self.dir_D135, opt.max_dataset_size))   # load images from '/path/to/data/trainA'
        if self.isTrain:
            self.path = os.path.join(opt.dataroot, "training.hdf5")
        elif not self.isTrain:
            self.path = os.path.join(opt.dataroot, "test.hdf5")
        if not os.path.exists(self.path):
            raise IOError('No such File %s.' % self.path)

        if self.path.endswith('hdf5'):
            f = h5py.File(self.path,'r')
            lr_data = []
            hr_data = []
            # v_lr_data = []
            # v_hr_data = []
            for key in f.keys():
                lr_data.append(list(np.asarray(f[key].get('LF_lr')[:])))
                hr_data.append(list(np.asarray(f[key].get('LF_hr')[:])))
                # v_lr_data.append(list(np.asarray(f[key].get('valid_data')[:])))
                # v_hr_data.append(list(np.asarray(f[key].get('valid_label')[:])))
                
            lr_data = np.array(lr_data)
            hr_data = np.array(hr_data)
            # v_lr_data = np.array(v_lr_data)
            # v_hr_data = np.array(v_hr_data)
            
            # if self.opt.trainNet_type == 'row':

            self.lr_data_row = lr_data[:,4,:,:,:]
            self.hr_data_row = hr_data[:,4,:,:,:]
            # elif self.opt.data_type == 'col':
            self.lr_data_col = lr_data[:,:,4,:,:]
            self.hr_data_col = hr_data[:,:,4,:,:]
            
            ldiag_ind = [(i,i) for i in range(lr_data.shape[1])]
            rdiag_ind = [(i,lr_data.shape[1]-i-1) for i in range(lr_data.shape[1])]
            self.lr_data_ldiag = lr_data[:,np.array(ldiag_ind)[:,0],np.array(ldiag_ind)[:,1],:,:,:]
            self.hr_data_ldiag = hr_data[:,np.array(ldiag_ind)[:,0],np.array(ldiag_ind)[:,1],:,:,:]
            
            self.lr_data_rdiag = lr_data[:,np.array(rdiag_ind)[:,0],np.array(rdiag_ind)[:,1],:,:,:]
            self.hr_data_rdiag = hr_data[:,np.array(rdiag_ind)[:,0],np.array(rdiag_ind)[:,1],:,:,:]

            # dshape = lambda x : x.reshape(x.shape[0]*x.shape[1],x.shape[2],x.shape[3],x.shape[4])
            
            # lr_data = dshape(lr_data)# lr_data.reshape(lr_data.shape[0]*lr_data.shape[1],lr_data[2],lr_data[3],lr_data[4])
            # hr_data = dshape(hr_data)
            # v_lr_data = dshape(v_lr_data)
            # v_hr_data = dshape(v_hr_data)
            self.transform = get_transform(self.opt, grayscale=(self.opt.output_nc == 1),convert = self.opt.convert)
            # print('Reading LF data from ', self.path)
            # print('Train data Size', lr_data.shape, ' Range: ',lr_data.max(),lr_data.min())
            # print('Train label Size', hr_data.shape, ' Range: ',hr_data.max(),hr_data.min())
            # print('Validation data Size', v_lr_data.shape, ' Range: ',v_lr_data.max(),v_lr_data.min())
            # print('Validation label size', v_hr_data.shape, ' Range: ',v_lr_data.max(),v_lr_data.min())
            pass


    def __getitem__(self, index):
        """Return a data point and its metadata information.
        """
        lr_data_row = self.lr_data_row[index,:,:,:,:]
        hr_data_row = self.hr_data_row[index,:,:,:,:]
        
        lr_data_col = self.lr_data_col[index,:,:,:,:]
        hr_data_col = self.hr_data_col[index,:,:,:,:]
        
        lr_data_ldiag = self.lr_data_ldiag[index,:,:,:,:]
        hr_data_ldiag = self.hr_data_ldiag[index,:,:,:,:]
        
        lr_data_rdiag = self.lr_data_rdiag[index,:,:,:,:]
        hr_data_rdiag = self.hr_data_rdiag[index,:,:,:,:]
        
        path = self.path+"/img_"+str(index)+".png"
        
        lr_tmp = []
        hr_tmp = []
        for i in range(self.times_steps):
            lr_tmp.append(np.transpose(lr_data_row[i,:,:,:]/lr_data_row[i,:,:,:].max(),(2,0,1)))
            hr_tmp.append(np.transpose(hr_data_row[i,:,:,:]/hr_data_row[i,:,:,:].max(),(2,0,1)))
            # lr_PIL= Image.fromarray(lr_data_row[i,:,:,:].astype(np.uint8))
            # hr_PIL= Image.fromarray(hr_data_row[i,:,:,:].astype(np.uint8))
            # lr_tmp.append(self.transform(lr_PIL).numpy())
            # hr_tmp.append(self.transform(hr_PIL).numpy())
        lr_row = torch.from_numpy(np.array(lr_tmp))
        hr_row = torch.from_numpy(np.array(hr_tmp))
        
        lr_tmp = []
        hr_tmp = []
        for i in range(self.times_steps):
            lr_tmp.append(np.transpose(lr_data_col[i,:,:,:]/lr_data_col[i,:,:,:].max(),(2,0,1)))
            hr_tmp.append(np.transpose(hr_data_col[i,:,:,:]/hr_data_col[i,:,:,:].max(),(2,0,1)))
            # lr_PIL= Image.fromarray(lr_data_col[i,:,:,:].astype(np.uint8))
            # hr_PIL= Image.fromarray(hr_data_col[i,:,:,:].astype(np.uint8))
            # lr_tmp.append(self.transform(lr_PIL).numpy())
            # hr_tmp.append(self.transform(hr_PIL).numpy())
        lr_col = torch.from_numpy(np.array(lr_tmp))
        hr_col = torch.from_numpy(np.array(hr_tmp))
        
        
        lr_tmp = []
        hr_tmp = []
        for i in range(self.times_steps):
            lr_tmp.append(np.transpose(lr_data_ldiag[i,:,:,:]/lr_data_ldiag[i,:,:,:].max(),(2,0,1)))
            hr_tmp.append(np.transpose(hr_data_ldiag[i,:,:,:]/hr_data_ldiag[i,:,:,:].max(),(2,0,1)))
            # lr_PIL= Image.fromarray(lr_data_ldiag[i,:,:,:].astype(np.uint8))
            # hr_PIL= Image.fromarray(hr_data_ldiag[i,:,:,:].astype(np.uint8))
            # lr_tmp.append(self.transform(lr_PIL).numpy())
            # hr_tmp.append(self.transform(hr_PIL).numpy())
        lr_ldiag = torch.from_numpy(np.array(lr_tmp))
        hr_ldiag = torch.from_numpy(np.array(hr_tmp))
        
        lr_tmp = []
        hr_tmp = []
        for i in range(self.times_steps):
            lr_tmp.append(np.transpose(lr_data_rdiag[i,:,:,:]/lr_data_rdiag[i,:,:,:].max(),(2,0,1)))
            hr_tmp.append(np.transpose(lr_data_rdiag[i,:,:,:]/lr_data_rdiag[i,:,:,:].max(),(2,0,1)))
            # lr_PIL= Image.fromarray(lr_data_rdiag[i,:,:,:].astype(np.uint8))
            # hr_PIL= Image.fromarray(hr_data_rdiag[i,:,:,:].astype(np.uint8))
            # lr_tmp.append(self.transform(lr_PIL).numpy())
            # hr_tmp.append(self.transform(hr_PIL).numpy())
        lr_rdiag = torch.from_numpy(np.array(lr_tmp))
        hr_rdiag = torch.from_numpy(np.array(hr_tmp))
        
        return {'lr_row': lr_row, 'hr_row': hr_row,
                'lr_col': lr_col, 'hr_col': hr_col, 
                'lr_ldiag':lr_ldiag,'hr_ldiag':hr_ldiag,
                'lr_rdiag':lr_rdiag,'hr_rdiag':hr_rdiag,
                "image_path":path,
                "isTrain":self.isTrain,}

    def __len__(self):
        """Return the total number of images in the dataset.

        As we have two datasets with potentially different number of images,
        we take a maximum of
        """
        return self.lr_data_row.shape[0]


