"""
paper: GridDehazeNet: Attention-Based Multi-Scale Network for Image Dehazing
file: train_data.py
about: build the training dataset
author: Xiaohong Liu
date: 01/08/19
"""

# --- Imports --- #
import torch.utils.data as data
from PIL import Image,ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
# from random import randrange
# from torchvision.transforms import Compose, ToTensor, Normalize, Resize
import torchvision.transforms as tfs
from torchvision.transforms import functional as FF
import imghdr
import random
# --- Training dataset --- #
class TrainData(data.Dataset):
    def __init__(self, crop_size, train_data_dir):
        super().__init__()
        train_list = train_data_dir + 'trainlist.txt'
        with open(train_list) as f:
            contents = f.readlines()
            haze_names = [i.strip() for i in contents]
            gt_names = haze_names#[i.split('_')[0] for i in haze_names]
             
        self.haze_names = haze_names
        self.gt_names = gt_names
        self.crop_size = crop_size
        self.train_data_dir = train_data_dir
        self.size = crop_size[0]
    def get_images(self, index):
        crop_width, crop_height = self.crop_size
        haze_name = self.haze_names[index]
        gt_name = self.gt_names[index]

        # filename1=self.train_data_dir + 'haze/' + haze_name

        #f = open('check_error.txt','w+')
        #check = imghdr.what(filename1)
        #if check != None:
            #print(filename1)
            #f.write(filename1)
 
            #f.write('\n')
 
            #error_images.append(filename1)
 

        haze = Image.open(self.train_data_dir + 'lowlight_aligned/' + haze_name).convert('RGB')
        #haze=haze.resize((512, 512))
 


        try:
            clear = Image.open(self.train_data_dir + 'gt_aligned/' + gt_name ).convert('RGB') 
            #gt_img = Image.open(self.train_data_dir + 'gt/' + gt_name )
        except:
            clear = Image.open(self.train_data_dir + 'gt_aligned/' + gt_name).convert('RGB')
            #gt_img = Image.open(self.train_data_dir + 'clear/' + gt_name ).convert('RGB')
        
        #import pdb
        #pdb.set_trace()          
        #clear=clear.resize((512, 512))
        #haze,clear=self.cutblur(haze,clear)
        
        
        width, height = haze.size
        clear=tfs.CenterCrop(haze.size[::-1])(clear) 
        if width < crop_width or height < crop_height:
            raise Exception('Bad image size: {}'.format(gt_name))

        if not isinstance(self.size,str):
            i,j,h,w=tfs.RandomCrop.get_params(haze,output_size=(self.size,self.size))
            haze=FF.crop(haze,i,j,h,w)
            clear=FF.crop(clear,i,j,h,w)
        haze,gt=self.augData(haze.convert("RGB") ,clear.convert("RGB") )
        # return haze,clear
        # --- x,y coordinate of left-top corner --- #


        # --- Check the channel is 3 or not --- # 
        if list(haze.shape)[0] is not 3 or list(gt.shape)[0] is not 3: 
            #print(gt_name)
            raise Exception('Bad image channel: {}'.format(gt_name))

        return haze, gt
    def augData(self,data,target):
        #if self.train:
        if 1:  
            rand_hor=random.randint(0,1)
            rand_rot=random.randint(0,3)
            data=tfs.RandomHorizontalFlip(rand_hor)(data)
            target=tfs.RandomHorizontalFlip(rand_hor)(target)
            if rand_rot:
                data=FF.rotate(data,90*rand_rot)
                target=FF.rotate(target,90*rand_rot)
        data=tfs.ToTensor()(data)
        #data=tfs.Normalize(mean=[0.5, 0.5, 0.5],std=[0.5,0.5, 0.5])(data)
        target=tfs.ToTensor()(target)
        #print(data,target)
        #data,target=self.cutblur(data,target)
        
        return  data ,target
    def __getitem__(self, index):
        res = self.get_images(index)
        return res 

    def __len__(self): 
        return len(self.haze_names)


    def cutblur(im1, im2, prob=1.0, alpha=1.0):
        if im1.shape() != im2.shape():
            raise ValueError("im1 and im2 have to be the same resolution.")

        if alpha <= 0 or np.random.rand(1) >= prob:
            return im1, im2

        cut_ratio = np.random.randn() * 0.01 + alpha

        h, w = im2.shape[2], im2.shape[3]
        ch, cw = np.int(h*cut_ratio), np.int(w*cut_ratio)
        cy = np.random.randint(0, h-ch+1)
        cx = np.random.randint(0, w-cw+1)

    # apply CutBlur to inside or outside
        if np.random.random() > 0.5:
            im2[..., cy:cy+ch, cx:cx+cw] = im1[..., cy:cy+ch, cx:cx+cw]
        else:
            im2_aug = im1.clone()
            im2_aug[..., cy:cy+ch, cx:cx+cw] = im2[..., cy:cy+ch, cx:cx+cw]
            im2 = im2_aug

        return im1, im2