import torch
import torch.utils.data
import random
import numpy as np
from pathlib import Path
from mynn.utils.img_util import rgb2ycbcr
from mynn.utils.registry import DATASET_REGISTRY
from mynn.utils import FileClient, imfrombytes, paired_random_crop, augment, img2tensor, get_mask


@DATASET_REGISTRY.register()
class ImageWithMaskDataset(torch.utils.data.Dataset):

    def __init__(self, dataset_opt, phase='train'):
        super().__init__()

        assert phase in ['train', 'test']

        self.dataset_opt = dataset_opt
        self.file_client = None
        self.phase = phase
        self.dataroot_gt = Path(dataset_opt['gt_root'])
        self.dataroot_lq = Path(dataset_opt['lq_root'])
        self.scale = dataset_opt['scale']
        self.color_mode = dataset_opt['color_mode']

        if self.phase == 'train':
            self.gt_size = dataset_opt['gt_size']
            self.use_hflip = dataset_opt['use_hflip']
            self.use_rot = dataset_opt['use_rot']

        assert self.color_mode in ['RGB', 'YCbCr', 'Y']

        self.keys = []

        with open(dataset_opt['meta_info_file'], 'r') as fin:
            for line in fin:
                img_name, _ = line.split(' ')
                self.keys.append(f'{img_name}/')

    def get(self, index):
        return self.__getitem__(index)

    def __getitem__(self, index):
        if self.file_client is None:
            self.file_client = FileClient()

        img_name = self.keys[index]

        # Read GT image.
        img_gt_path = self.dataroot_gt / img_name
        img_bytes = self.file_client.get(img_gt_path)
        img_gt = imfrombytes(img_bytes, float32=True)

        # Read LQ image.
        img_lq_path = self.dataroot_lq / img_name
        img_bytes = self.file_client.get(img_lq_path)
        img_lq = imfrombytes(img_bytes, float32=True)

        # Convert color space.
        if self.color_mode == 'Y':
            img_gt = rgb2ycbcr(img_gt, y_only=True)
            img_lq = rgb2ycbcr(img_lq, y_only=True)

        if self.phase=='train':
            # Randomly crop.
            img_gt, img_lq = paired_random_crop(img_gt, img_lq, self.gt_size, self.scale, img_gt_path)

            # Augmentation.
            results = [img_lq, img_gt]
            results = augment(results, self.use_hflip, self.use_rot)

            img_lq = results[0]
            img_gt = results[1]

        # Get mask.
        mask_lq, mask_gt = get_mask(img_lq)

        # Convert to tensor.
        mask_lq = torch.from_numpy(mask_lq)
        mask_gt = torch.from_numpy(mask_gt)
        mask_lq = mask_lq.permute(2, 0, 1)
        mask_gt = mask_gt.permute(2, 0, 1)

        img_lq = torch.from_numpy(img_lq)
        img_gt = torch.from_numpy(img_gt)
        img_lq = img_lq.permute(2, 0, 1)
        img_gt = img_gt.permute(2, 0, 1)

        return {'lq': img_lq, 'gt': img_gt, 'mask_lq': mask_lq, 'mask_gt': mask_gt, 'key': img_name}

    def __len__(self):
        return len(self.keys)
