"""create dataset and dataloader"""
import logging
import torch
import torch.utils.data
import os
import sys
import cv2
import numpy as np
from data.util import imresize_np



def create_dataloader(dataset, dataset_opt, opt=None, sampler=None):
    return torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0,
                                           pin_memory=False)


def create_dataset(dataset_opt):
    mode = dataset_opt['mode']
    # datasets for image restoration
    if mode == 'LQ':
        from data.LQ_dataset import LQDataset as D
    elif mode == 'LQGT':
        from data.LQGT_dataset import LQGTDataset as D
    elif mode == 'LQGT_rcan':
        from data.LQGT_rcan_dataset import LQGTDataset_rcan as D
    elif mode == 'LQ_label':
        from data.LQ_label_dataset import LQ_label_Dataset as D
    # datasets for video restoration
    elif mode == 'REDS':
        from data.REDS_dataset import REDSDataset as D
    elif mode == 'Vimeo90K':
        from data.Vimeo90K_dataset import Vimeo90KDataset as D
    elif mode == 'video_test':
        from data.video_test_dataset import VideoTestDataset as D
    else:
        raise NotImplementedError('Dataset [{:s}] is not recognized.'.format(mode))
    dataset = D(dataset_opt)

    logger = logging.getLogger('base')
    # logger.info('Dataset [{:s} - {:s}] is created.'.format(dataset.__class__.__name__,
    #                                                        dataset_opt['name']))
    return dataset



def generate_mod_LR_bic(src_path, save_path):
    # set parameters
    up_scale = 4
    mod_scale = 4

    #
    sourcedir = src_path
    # savedir = r'E:\project\SoftwareB\ClassSR\data\DIV8K'

    saveLRpath = save_path
    # saveBicpath = os.path.join(savedir, 'Bic', 'x' + str(up_scale))

    if not os.path.isdir(sourcedir):
        print('Error: No source data found')
        exit(0)


    # if not os.path.isdir(saveLRpath):
    #     os.mkdir(saveLRpath)


    # if not os.path.isdir(saveHRpath):
    #     os.mkdir(saveHRpath)
    # else:
    #     print('It will cover ' + str(saveHRpath))
    #
    # if not os.path.isdir(saveLRpath):
    #     os.mkdir(saveLRpath)
    # else:
    #     print('It will cover ' + str(saveLRpath))
    #
    # if not os.path.isdir(saveBicpath):
    #     os.mkdir(saveBicpath)
    # else:
    #     print('It will cover ' + str(saveBicpath))

    # filepaths = [f for f in os.listdir(sourcedir) if f.endswith('.png')]
    filepaths = [f for f in os.listdir(sourcedir)]

    num_files = len(filepaths)

    # prepare data with augementation
    for i in range(num_files):
        filename = filepaths[i]
        # print('-- Processing {}'.format(filename))
        # read image
        image = cv2.imread(os.path.join(sourcedir, filename))

        width = int(np.floor(image.shape[1] / mod_scale))
        height = int(np.floor(image.shape[0] / mod_scale))
        # modcrop
        if len(image.shape) == 3:
            image_HR = image[0:mod_scale * height, 0:mod_scale * width, :]
        else:
            image_HR = image[0:mod_scale * height, 0:mod_scale * width]
        # LR
        image_LR = imresize_np(image_HR, 1 / up_scale, True)
        # bic
        #image_Bic = imresize_np(image_LR, up_scale, True)

        # cv2.imwrite(os.path.join(saveHRpath, filename), image_HR)
        cv2.imwrite(os.path.join(saveLRpath, filename), image_LR)
        #cv2.imwrite(os.path.join(saveBicpath, filename), image_Bic)
