# Courtsy of https://github.com/xinntao/BasicSR

import random
import numpy as np
import cv2
import torch
import torch.utils.data as data

import os

import math
from torchvision.utils import make_grid
import torchvision.datasets as dsets
import matplotlib.pyplot as plt

import re
import pickle
from PIL import Image
from torch.utils.data import Dataset
from torch.utils.data.sampler import Sampler

########################################################################
# purify.py 
class custom_dataset(data.Dataset):
    def __init__(self, dir_root):
        super(custom_dataset, self).__init__()

        self.paths = get_image_paths(dir_root)
        self.len = len(dir_root)

    def __getitem__(self, index):

        # get  image
        path = self.paths[index]
        img = read_img(path)

        # BGR to RGB, HWC to CHW, numpy to tensor
        if img.shape[2] == 3:
            img = img[:, :, [2, 1, 0]]
        img = torch.from_numpy(np.ascontiguousarray(np.transpose(img, (2, 0, 1)))).float()

        if img.size(0) == 1:
            # stack greyscale image
            img = torch.cat((img, img, img), dim=0)
        if img.size(0) == 4:
            # remove alpha channel
            img = img[:3, :, :]

        # return img, path.split('/')[-1] # 以‘/ ’为分割f符，保留最后一段
        # return img, path.split('\\')[-2], path.split('\\')[-1]
        # return img, path[31:-28], path[-28:]
        return img, path[self.len:-28], path[-28:] # ImageNet

    def __len__(self):
        return len(self.paths)

def _get_paths_from_images(path):
    '''get image path list from image folder'''
    assert os.path.isdir(path), '{:s} is not a valid directory'.format(path)
    images = []
    for dirpath, _, fnames in sorted(os.walk(path)):
        for fname in sorted(fnames):
            img_path = os.path.join(dirpath, fname)
            images.append(img_path)
    assert images, '{:s} has no valid image file'.format(path)
    return images


def get_image_paths(dataroot):
    paths = sorted(_get_paths_from_images(dataroot)) # sorted()函数对所有可迭代的对象进行排序操作。内建函数 sorted 方法返回的是一个新的 list，而不是在原来的基础上进行的操作。
    return paths


def read_img(path, size=None):
    '''read image by cv2 or from lmdb
    return: Numpy float32, HWC, BGR, [0,1]'''
    img = cv2.imread(path, cv2.IMREAD_UNCHANGED)

    # Resizing for natural images
    # img = cv2.resize(img, (256, 256))
    # img = cv2.resize(img, (224, 224))

    img = img.astype(np.float32) / 255.
    if img.ndim == 2:
        img = np.expand_dims(img, axis=2)
    # some images have 4 channels
    if img.shape[2] > 3:
        img = img[:, :, :3]
    return img


def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
    '''
    Converts a torch Tensor into an image Numpy array
    Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
    Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
    '''
    tensor = tensor.squeeze().float().cpu().clamp_(*min_max)  # clamp
    tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0])  # to range [0,1]
    n_dim = tensor.dim()
    if n_dim == 4:
        n_img = len(tensor)
        img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()
        img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0))  # HWC, BGR
    elif n_dim == 3:
        img_np = tensor.numpy()
        img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0))  # HWC, BGR
    elif n_dim == 2:
        img_np = tensor.numpy()
    else:
        raise TypeError(
            'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
    if out_type == np.uint8:
        img_np = (img_np * 255.0).round()
        # Important. Unlike matlab, numpy.unit8() WILL NOT round by default.
    return img_np.astype(out_type)

# CAFD-main train_denoiser.py  检查，若不需，可删除
# dataload.py
# ======================================= DATA LOAD ========================================
def sort_key(s):
    #sort_strings_with_embedded_numbers
    re_digits = re.compile(r'(\d+)')
    pieces = re_digits.split(s)  
    pieces[1::2] = map(int, pieces[1::2])  
    return pieces


class ImageDataset(Dataset):  
    """
    Clean dataset.
    Args:
        img_dirs: dir list to clean images from.
    """

    # def __init__(self, imgcln_dirs, imgadv_dirs, transform = None):  # shuffle:随机排列
    def __init__(self, imgcln_dirs, imgadv_dirs, transform):
        self.imgcln_dirs = imgcln_dirs
        self.imgadv_dirs = imgadv_dirs
        self.img_names = self.__get_imgnames__()
        self.transform = transform

    def __get_imgnames__(self):
        tmp = []
        images_name = os.listdir(self.imgcln_dirs)
        # sort() 函数用于对原列表进行排序，如果指定参数，则使用比较函数指定的比较函数。
        images_name.sort(key=sort_key) # 将图像名字按照从小到大排序
        for name in images_name:
            tmp.append(os.path.join(self.imgcln_dirs, name))
        return tmp

    def __len__(self):
        return len(self.img_names)

    def __getitem__(self, idx):
        imagecln_path = self.img_names[idx]
        # imagecln = np.load(imagecln_path)
        # imagecln = np.load(imagecln_path, allow_pickle=True)
        # imagecln = imagecln.astype(np.float32) # 强制类型转换
        imagecln = Image.open(imagecln_path).convert('RGB')
        

        imageadv_path = imagecln_path.replace(self.imgcln_dirs, self.imgadv_dirs)
        # imageadv = np.load(imageadv_path)
        # imageadv = np.load(imageadv_path, allow_pickle=True)
        # imageadv = imageadv.astype(np.float32)
        imageadv = Image.open(imageadv_path).convert('RGB')
        

        if self.transform:
            imagecln = self.transform(imagecln)
            imageadv = self.transform(imageadv)

        return imagecln, imageadv


class PairedImageDataset(Dataset):  
    """
    Clean dataset.
    Args:
        img_dirs: dir list to clean images from.
    """

    # def __init__(self, imgcln_dirs, imgadv_dirs, transform = None):  # shuffle:随机排列
    def __init__(self, imgcln_dirs, imgadv_dirs, transform):
        self.imgcln_dirs = imgcln_dirs
        self.imgadv_dirs = imgadv_dirs
        self.img_names = self.__get_imgnames__()
        self.transform = transform

    def __get_imgnames__(self):
        tmp = []
        images_name = os.listdir(self.imgcln_dirs)
        # sort() 函数用于对原列表进行排序，如果指定参数，则使用比较函数指定的比较函数。
        images_name.sort(key=sort_key) # 将图像名字按照从小到大排序
        for name in images_name:
            tmp.append(os.path.join(self.imgcln_dirs, name))
        return tmp

    def __len__(self):
        return len(self.img_names)

    def __getitem__(self, idx):
        imagecln_path = self.img_names[idx]
        # imagecln = np.load(imagecln_path)
        # imagecln = np.load(imagecln_path, allow_pickle=True)
        # imagecln = imagecln.astype(np.float32) # 强制类型转换
        imagecln = Image.open(imagecln_path).convert('RGB')
        

        imageadv_path = imagecln_path.replace(self.imgcln_dirs, self.imgadv_dirs)
        # imageadv = np.load(imageadv_path)
        # imageadv = np.load(imageadv_path, allow_pickle=True)
        # imageadv = imageadv.astype(np.float32)
        imageadv = Image.open(imageadv_path).convert('RGB')
        

        if self.transform:
            # imagecln = self.transform(imagecln)
            # imageadv = self.transform(imageadv)
            imageadv, imagecln = self.transform(imageadv, imagecln)

        return imagecln, imageadv
        

class AdvImageDataset(Dataset):  
    """
    Clean dataset.
    Args:
        img_dirs: dir list to clean images from.
    """

    # def __init__(self, imgcln_dirs, imgadv_dirs, transform = None):  # shuffle:随机排列
    def __init__(self, img_dirs, transform):
        self.img_dirs = img_dirs
        self.img_names = self.__get_imgnames__()
        self.transform = transform

    def __get_imgnames__(self):
        tmp = []
        images_name = os.listdir(self.img_dirs)
        # sort() 函数用于对原列表进行排序，如果指定参数，则使用比较函数指定的比较函数。
        images_name.sort(key=sort_key) # 将图像名字按照从小到大排序
        for name in images_name:
            tmp.append(os.path.join(self.img_dirs, name))
        return tmp

    def __len__(self):
        return len(self.img_names)

    def __getitem__(self, idx):
        image_path = self.img_names[idx]
        image = Image.open(image_path)
        if self.transform:
            image = self.transform(image)

        # return image, image_path[-20:]
        return image, os.path.basename(image_path)


class CleanImageDataset(Dataset):  
    """
    Clean dataset.
    Args:
        img_dirs: dir list to clean images from.
    """

    # def __init__(self, imgcln_dirs, imgadv_dirs, transform = None):  # shuffle:随机排列
    def __init__(self, img_dirs, transform):
        self.img_dirs = img_dirs
        self.img_names = self.__get_imgnames__()
        self.transform = transform

    def __get_imgnames__(self):
        tmp = []
        images_name = os.listdir(self.img_dirs)
        # sort() 函数用于对原列表进行排序，如果指定参数，则使用比较函数指定的比较函数。
        images_name.sort(key=sort_key) # 将图像名字按照从小到大排序
        for name in images_name:
            tmp.append(os.path.join(self.img_dirs, name))
        return tmp

    def __len__(self):
        return len(self.img_names)

    def __getitem__(self, idx):
        image_path = self.img_names[idx]
        image = Image.open(image_path)
        if self.transform:
            image = self.transform(image)

        return image
        



class EnlargedSampler(Sampler):
    """Sampler that restricts data loading to a subset of the dataset.

    Modified from torch.utils.data.distributed.DistributedSampler
    Support enlarging the dataset for iteration-based training, for saving
    time when restart the dataloader after each epoch

    Args:
        dataset (torch.utils.data.Dataset): Dataset used for sampling.
        num_replicas (int | None): Number of processes participating in
            the training. It is usually the world_size.
        rank (int | None): Rank of the current process within num_replicas.
        ratio (int): Enlarging ratio. Default: 1.
    """

    def __init__(self, dataset, num_replicas, rank, ratio=1):
        self.dataset = dataset
        self.num_replicas = num_replicas
        self.rank = rank
        self.epoch = 0
        self.num_samples = math.ceil(len(self.dataset) * ratio / self.num_replicas)
        self.total_size = self.num_samples * self.num_replicas

    def __iter__(self):
        # deterministically shuffle based on epoch
        g = torch.Generator()
        g.manual_seed(self.epoch)
        indices = torch.randperm(self.total_size, generator=g).tolist()

        dataset_size = len(self.dataset)
        indices = [v % dataset_size for v in indices]

        # subsample
        indices = indices[self.rank:self.total_size:self.num_replicas]
        assert len(indices) == self.num_samples

        return iter(indices)

    def __len__(self):
        return self.num_samples

    def set_epoch(self, epoch):
        self.epoch = epoch