import torch
import torchvision
from torchvision import transforms
import numpy as np
import imageio as io
from skimage.transform import resize as skresize
import os
import glob

def discretize(t, num_bits):
    t = t * (2**num_bits - 1)
    t = torch.bucketize(t, boundaries=torch.arange(2**num_bits + 1))
    t = t / (2**num_bits - 1)
    return t
    
class AFHQDataset(torchvision.datasets.VisionDataset):

    def __init__(self, ambient=False, degradation=None, train=True, input_shape=[3,128,128], num_bits=0, on_the_fly=True, data_root_path='/home/baiweimin/yifei/afhq/train/cat', augmentation='', flip=True, gt=False, inpainting=False, **kwargs):

        if train == True:   split = 'train'
        else:               split = 'test'

        self.augmentation = augmentation

        # def standardize(x): return x - 0.5
        transform = [
            transforms.ToTensor(),
            transforms.ConvertImageDtype(torch.float),
            transforms.Resize(input_shape[1:], antialias=True),
        ]
        # if num_bits:
        #     transform.append(
        #         transforms.Lambda( lambda x : discretize(x, num_bits) )
        #     )
        
        # if flip:
        #     transform.append(transforms.RandomHorizontalFlip())
        # transform.append(transforms.Lambda(lambda x: x - 0.5))
        self.gt_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.ConvertImageDtype(torch.float),
            transforms.Resize(input_shape[1:], antialias=True),
            # transforms.Lambda(lambda x: x - 0.5),
            ])
        if ambient:
            transform.append(degradation)
        transform = transforms.Compose(transform)

        super(AFHQDataset, self).__init__(root=data_root_path, transform=transform, target_transform=None)

        if split == 'train':
            self.image_filenames = sorted(glob.glob(
                os.path.join(self.root, '*.jpg' )
            ))[:-500]
        elif split == 'test':
            self.image_filenames = sorted(glob.glob(
                os.path.join(self.root, '*.jpg' )
            ))[-500:]

        if ambient and (not on_the_fly):
            # self.noise_seeds = np.fromfile('/home/vak2/system_seeds.dat', np.int32)
            self.noise_seeds = np.array(range(60000))
        self.ambient = ambient
        self.on_the_fly = on_the_fly
        self.degradation = degradation
        self.gt = gt
        self.inpainting = inpainting
        if inpainting == True:
            masks = torch.where(torch.rand(len(self.image_filenames),1,input_shape[1],input_shape[2])>0.4,1,0)
            print(masks.shape)
            masks = masks.repeat(1,3,1,1)
            print(masks.shape)
            self.masks = masks

        print(f"transorm: {transform}")
        print(f"gt_transorm: {self.gt_transform}")

    def __getitem__(self, index: int):

        X = io.imread(self.image_filenames[index])
        
        gt_X = X.copy()
        if self.ambient and (not self.on_the_fly):
            self.degradation.rng.manual_seed(int(self.noise_seeds[index]))

        if self.transform is not None:
            X = self.transform(X)

        if self.inpainting == True:
            mask = self.masks[index]
            X = mask*X
        
        return X, self.gt_transform(gt_X)

    def __len__(self):
        return len(self.image_filenames)

class MNISTDataset(torchvision.datasets.MNIST):
    def __init__(self, ambient=False, degradation=None, train=True, power_of_two=False, num_bits=0, data_root_path='/home/baiweimin/yifei/ambientflow/mnist',  **kwargs):
        print(data_root_path)
        transform = [
            transforms.ToTensor(),
            transforms.Lambda( lambda x : torch.cat([x,x,x],0) ),
        ]
        if power_of_two:
            transform.append(transforms.Pad((32-28)//2))
        if num_bits:
            transform.append(
                transforms.Lambda( lambda x : discretize(x, num_bits) )
            )
        if degradation!=None:
            transform.append(degradation)
        transform.append(transforms.Lambda( lambda x : x - 0.5 ))
        transform = transforms.Compose(transform)

        super(MNISTDataset, self).__init__(root=data_root_path, train=train, transform=transform)