import glob
import random

import cv2
import numpy as np
import torch
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from torchvision.transforms import RandomAffine,Resize
from torchvision.transforms import ToTensor


def get_dataset_loader(config):
    val_transforms = [ToTensor()]
    level = config['noise_level']  # set noise level
    transforms_1 = [
        RandomAffine(degrees=level, translate=[0.02 * level, 0.02 * level],
                     scale=[1 - 0.02 * level, 1 + 0.02 * level], fillcolor=-1),
        Resize(size=(config['size'], config['size']))
    ]

    transforms_2 = [
        RandomAffine(degrees=level, translate=[0.02 * level, 0.02 * level],
                     scale=[1 - 0.02 * level, 1 + 0.02 * level], fillcolor=-1),  #
        Resize(size=(config['size'], config['size']))
    ]
    transforms_3 = [
        RandomAffine(degrees=level, translate=[0.02 * level, 0.02 * level],
                     scale=[1 - 0.02 * level, 1 + 0.02 * level], fillcolor=-1),  #
        Resize(size=(config['size'], config['size']))
    ]
    data_loader = DataLoader(
        ImageDatasetWithoutLI(config['datarootJI'],
                              transforms_1=transforms_1, transforms_2=transforms_2, transforms_3=transforms_3,
                              unaligned=False,
                              len_ratio=1.0),
        batch_size=config['batchSize'], shuffle=True, num_workers=0, drop_last=True
    )
    val_data_loader = DataLoader(
        ValDatasetWithoutLI(config['val_datarootJI'],
                            transforms_=val_transforms,
                            unaligned=False,
                            len_ratio=0.3),
        batch_size=config['val_batchSize'], shuffle=False, num_workers=config['n_cpu'])
    return data_loader, val_data_loader


class ImageDatasetWithoutLI(Dataset):
    def __init__(self, root, count=None, transforms_1=None, transforms_2=None, transforms_3=None, unaligned=False,
                 len_ratio=1.0):

        self.transform1 = transforms.Compose(transforms_1)
        self.transform2 = transforms.Compose(transforms_2)
        self.transform3 = transforms.Compose(transforms_3)

        self.files_A = sorted(glob.glob("%s/A/*" % root))
        self.files_C = sorted(glob.glob("%s/C/*" % root))  # 是gt图
        # self.files_A_LI = sorted(glob.glob("%s/L/*" % root))

        self.files_A = self.files_A[0:int(len_ratio * len(self.files_A))]
        self.files_C = self.files_C[0:int(len_ratio * len(self.files_C))]
        # self.files_A_LI = self.files_A_LI[0:int(len_ratio * len(self.files_A_LI))]

        self.unaligned = unaligned

    def __getitem__(self, index):
        seed = np.random.randint(2147483647)  # make a seed with numpy generator
        random.seed(seed)  # apply this seed to img tranfsorms

        item_A = cv2.imread(self.files_A[index % len(self.files_A)], 0)
        item_A = np.array(item_A)
        item_A = (item_A / 255) * 2 - 1

        # item_A_LI = cv2.imread(self.files_A_LI[index % len(self.files_A_LI)], 0)
        # item_A_LI = np.array(item_A_LI)
        # item_A_LI = (item_A_LI / 255) * 2 - 1

        # item_A = np.stack((item_A, item_A_LI), axis=0)
        item_A = np.expand_dims(item_A, axis=0)
        item_A = torch.from_numpy(item_A.astype(np.float32))
        item_A = self.transform1(item_A)


        item_C = cv2.imread(self.files_C[index % len(self.files_C)], 0)
        item_C = np.array(item_C)
        item_C = (item_C / 255) * 2 - 1

        # item_C_self = np.stack((item_C, item_C), axis=0)
        # item_C_self = torch.from_numpy(item_C_self.astype(np.float32))
        # item_C_self = self.transform2(item_C_self)

        # random.seed(seed)
        if self.unaligned:
            item_C = cv2.imread(self.files_C[random.randint(0, len(self.files_C) - 1)], 0)
            item_C = np.array(item_C)
            item_C = (item_C / 255) * 2 - 1
            item_C = self.transform3(item_C.astype(np.float32))
            # item_C = self.transform3(np.load(self.files_C[random.randint(0, len(self.files_C) - 1)]).astype(np.float32))
        else:
            item_C = item_C.astype(np.float32)
            # item_C = np.expand_dims(item_C, axis=0)
            item_C = np.expand_dims(item_C, axis=0)
            item_C = torch.from_numpy(item_C)
            item_C = self.transform3(item_C)

        # return {'AL': item_A, 'C': item_C, 'C_self': item_C_self}
        if_show = True

        if if_show:
            show_real_B = (item_C.clone().detach().cpu().numpy()[0] + 1) * 255 / 2
            show_real_A = (item_A.clone().detach().cpu().numpy()[0] + 1) * 255 / 2
            show_real_A = show_real_A.astype(np.uint8)
            show_real_B = show_real_B.astype(np.uint8)
            cv2.imshow("A", show_real_A)
            cv2.imshow("B", show_real_B)
            cv2.waitKey(0)
            cv2.destroyAllWindows()

        return {'A': item_A, 'B': item_C, 'A_paths': self.files_A[index % len(self.files_A)]}

    def __len__(self):
        return max(len(self.files_A), len(self.files_C))


class ValDatasetWithoutLI(Dataset):
    def __init__(self, root, count=None, transforms_=None, unaligned=False, len_ratio=1.0):
        self.transform = transforms.Compose(transforms_)
        self.unaligned = unaligned
        self.files_A = sorted(glob.glob("%s/A/*" % root))
        # self.files_B = sorted(glob.glob("%s/B/*" % root))
        self.files_B = sorted(glob.glob("%s/C_origin/*" % root))

        self.files_A = self.files_A[0:int(len_ratio * len(self.files_A))]
        self.files_B = self.files_B[0:int(len_ratio * len(self.files_B))]

    def __getitem__(self, index):
        # item_A = self.transform(np.load(self.files_A[index % len(self.files_A)]).astype(np.float32))
        read_path = self.files_A[index % len(self.files_A)]
        item_A = cv2.imread(read_path, 0)
        item_A = np.array(item_A)
        item_A = (item_A / 255) * 2 - 1
        item_A = self.transform(item_A.astype(np.float32))

        if self.unaligned:
            item_B = self.transform(np.load(self.files_B[random.randint(0, len(self.files_B) - 1)]))
        else:
            #  item_B = self.transform(np.load(self.files_B[index % len(self.files_B)]).astype(np.float32))
            item_B = cv2.imread(self.files_B[index % len(self.files_B)], 0)
            item_B = np.array(item_B)
            item_B = (item_B / 255) * 2 - 1
            item_B = self.transform(item_B.astype(np.float32))
        return {'A': item_A, 'B': item_B, 'read_path': read_path}

    def __len__(self):
        return max(len(self.files_A), len(self.files_B))
