File size: 5,510 Bytes
73ba284 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 12 20:00:46 2020
@author: Administrator
"""
import os
import os.path
import random
import numpy as np
import cv2
import h5py
import torch
import torch.utils.data as udata
import argparse
from PIL import Image
class Dataset(udata.Dataset):
r"""Implements torch.utils.data.Dataset
"""
def __init__(self, file, trainrgb=True,trainsyn = True, shuffle=False):
super(Dataset, self).__init__()
self.trainrgb = trainrgb
self.trainsyn = trainsyn
self.train_haze = file
h5f = h5py.File(self.train_haze, 'r')
self.keys = list(h5f.keys())
if shuffle:
random.shuffle(self.keys)
h5f.close()
def __len__(self):
return len(self.keys)
def __getitem__(self, index):
h5f = h5py.File(self.train_haze, 'r')
key = self.keys[index]
data = np.array(h5f[key])
h5f.close()
return torch.Tensor(data)
def data_augmentation(clear, mode):
r"""Performs dat augmentation of the input image
Args:
image: a cv2 (OpenCV) image
mode: int. Choice of transformation to apply to the image
0 - no transformation
1 - flip up and down
2 - rotate counterwise 90 degree
3 - rotate 90 degree and flip up and down
4 - rotate 180 degree
5 - rotate 180 degree and flip
6 - rotate 270 degree
7 - rotate 270 degree and flip
"""
clear = np.transpose(clear, (2, 3, 0, 1))
if mode == 0:
# original
clear = clear
elif mode == 1:
# flip up and down
clear = np.flipud(clear)
elif mode == 2:
# rotate counterwise 90 degree
clear = np.rot90(clear)
elif mode == 3:
# rotate 90 degree and flip up and down
clear = np.rot90(clear)
clear = np.flipud(clear)
elif mode == 4:
# rotate 180 degree
clear = np.rot90(clear, k=2)
elif mode == 5:
# rotate 180 degree and flip
clear = np.rot90(clear, k=2)
clear = np.flipud(clear)
elif mode == 6:
# rotate 270 degree
clear = np.rot90(clear, k=3)
elif mode == 7:
# rotate 270 degree and flip
clear = np.rot90(clear, k=3)
clear = np.flipud(clear)
else:
raise Exception('Invalid choice of image transformation')
return np.transpose(clear, (2, 3, 0, 1))
def img_to_patches(img,win,stride,Syn=True):
typ, chl, raw, col = img.shape
chl = int(chl)
num_raw = np.ceil((raw-win)/stride+1).astype(np.uint8)
num_col = np.ceil((col-win)/stride+1).astype(np.uint8)
count = 0
total_process = int(num_col)*int(num_raw)
img_patches = np.zeros([typ, chl, win, win, total_process])
if Syn:
for i in range(num_raw):
for j in range(num_col):
if stride * i + win <= raw and stride * j + win <=col:
img_patches[:,:,:,:,count] = img[:, :, stride*i : stride*i + win, stride*j : stride*j + win]
elif stride * i + win > raw and stride * j + win<=col:
img_patches[:,:,:,:,count] = img[:, :,raw-win : raw,stride * j : stride * j + win]
elif stride * i + win <= raw and stride*j + win>col:
img_patches[:,:,:,:,count] = img[:, :,stride*i : stride*i + win, col-win : col]
else:
img_patches[:,:,:,:,count] = img[:, :,raw-win : raw,col-win : col]
img_patches[:,:,:,:,count] = data_augmentation(img_patches[:, :, :, :, count], np.random.randint(0, 7))
count +=1
return img_patches
def read_img(img):
return np.array(Image.open(img))/255.
def Train_data(args):
file_list = os.listdir(f'{args.train_path}/{args.gt_name}')
with h5py.File(args.data_name, 'w') as h5f:
count = 0
for i in range(len(file_list)):
print(file_list[i])
img_list = []
img_list.append(read_img(f'{args.train_path}/{args.gt_name}/{file_list[i]}'))
for j in args.degradation_name:
img_list.append(read_img(f'{args.train_path}/{j}/{file_list[i]}'))
img = np.stack(img_list,0)
img = img_to_patches(img.transpose(0, 3, 1, 2), args.patch_size, args.stride)
for nx in range(img.shape[4]):
data = img[:,:,:,:,nx]
print(count, data.shape)
h5f.create_dataset(str(count), data=data)
count += 1
h5f.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = "Building the training patch database")
parser.add_argument("--patch-size", type = int, default=256, help="Patch size")
parser.add_argument("--stride", type = int, default=200, help="Size of stride")
parser.add_argument("--train-path", type = str, default='./data/CDD-11_train', help="Train path")
parser.add_argument("--data-name", type = str, default='dataset.h5', help="Data name")
parser.add_argument("--gt-name", type = str, default='clear', help="HQ name")
parser.add_argument("--degradation-name", type = list, default=['low','haze','rain','snow',\
'low_haze','low_rain','low_snow','haze_rain','haze_snow','low_haze_rain','low_haze_snow'], help="LQ name")
args = parser.parse_args()
Train_data(args) |