"""
paper: GridDehazeNet: Attention-Based Multi-Scale Network for Image Dehazing
file: val_data.py
about: build the validation/test dataset
author: Xiaohong Liu
date: 01/08/19
"""

# --- Imports --- #
import torch.utils.data as data
from PIL import Image
from torchvision.transforms import Compose, ToTensor, Normalize, Resize
import numpy as np
import torch
 
 
# --- Validation/test dataset --- #
class ValData(data.Dataset): 
    def __init__(self, val_data_dir):
        super().__init__()
        val_list = val_data_dir +'data_list.txt'# ## 'final_test_datalist.txt''val_list_common.txt'#'eval15/datalist.txt'data_list
        with open(val_list) as f:
            contents = f.readlines()
            haze_names = [i.strip() for i in contents]
            gt_names = haze_names#[i.split('_')[0] + '.png' for i in haze_names]

        self.haze_names = haze_names
        self.gt_names = gt_names
        self.val_data_dir = val_data_dir
        self.data_list=val_list  
    def get_images(self, index):
        haze_name = self.haze_names[index]
        gt_name = self.gt_names[index]
        haze_img = Image.open(self.val_data_dir + 'input/' + haze_name)  #'eval15/low/'
        gt_img = Image.open(self.val_data_dir + 'gt/'+ gt_name) #'gt/''eval15/high/'
         
        haze_img_np =  (np.asarray(haze_img)/255.0)  
        #gt_img_np =  (np.asarray(gt_img)/255.0)  
        a = haze_img_np.shape  
        # a_1 =a[1] -np.mod(a[1],16)#-256
        # a_0 =a[0] -np.mod(a[0],16)#  -256
        #haze_img = torch.from_numpy(haze_img[0:a_0,0:a_1,:]).float()  
        #gt_img = torch.from_numpy(gt_img[0:a_0,0:a_1,:]).float() 
        #haze_crop_img = haze_img.resize((3600,2400), Image.ANTIALIAS)
        #gt_crop_img = gt_img.resize((3600,2400), Image.ANTIALIAS)
        haze_crop_img = haze_img#.crop((0, 0, 0 + a_1, 0+a_0))
        gt_crop_img = gt_img#.crop((0, 0, 0 + a_1, 0+a_0))
        
        #haze_img = torch.from_numpy(haze_crop_img).float()
        #gt_img = torch.from_numpy(gt_crop_img).float()
        width, height = haze_crop_img.size
        # haze1=haze_crop_img.resize((int(width/2),int(height/2)), Image.ANTIALIAS)
        # haze2=haze_crop_img.resize((int(width/4),int(height/4)), Image.ANTIALIAS)
        # haze3=haze_crop_img.resize((int(width/8),int(height/8)), Image.ANTIALIAS)
        # haze4=haze_crop_img.resize((int(width/16),int(height/16)), Image.ANTIALIAS)          
        
        # --- Transform to tensor --- # 
        #transform_haze = Compose([ToTensor() , Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
        #transform_haze = Compose([ToTensor() , Normalize((0.64, 0.6, 0.58), (0.14,0.15, 0.152))])
        transform_haze = Compose([ToTensor()])
        transform_gt = Compose([ToTensor()])
        haze_img = transform_haze(haze_crop_img)
        gt_img = transform_gt(gt_crop_img)
        
        # haze1 = transform_haze(haze1)
        # haze2 = transform_haze(haze2)
        # haze3 = transform_haze(haze3)
        # haze4 = transform_haze(haze4)

        #factor= 0.8#(0.5 + 0.3*np.random.rand())#torch.mean(gt_img)/torch.mean(haze_img)
        #factor=torch.full_like(gt_img[0:1,:,:],factor) 
        return haze_img, gt_img, haze_name

    def __getitem__(self, index):
        res = self.get_images(index)
        return res

    def __len__(self):
        return len(self.haze_names)

