import os
import random

import torch
def predeal(sufffix='',ratio = 0.2):
    from shutil import copy
    
    dataroot = "/data1/yalin.huang/face_anti_spoofing_dataset/dataset2_all/Only_Face/"
    dir_S0_ori = os.path.join(dataroot, 'S0'+sufffix)  # create a path '/path/to/data/trainS0'
    dir_DOLP_ori = os.path.join(dataroot, 'DOLP'+sufffix)  # create a path '/path/to/data/trainDOLP'
    
    train_dirs = []
    dir_S0_train_dst = os.path.join(dataroot, 'S0'+sufffix,"train")  # create a path '/path/to/data/trainS0'
    dir_DOLP_train_dst = os.path.join(dataroot, 'DOLP'+sufffix,"train")  # create a path '/path/to/data/trainDOLP'
    train_dirs.append(dir_S0_train_dst)
    train_dirs.append(dir_DOLP_train_dst)
    
    test_dirs = []
    dir_S0_test_dst = os.path.join(dataroot, 'S0'+sufffix,"test")  # create a path '/path/to/data/testS0'
    dir_DOLP_test_dst = os.path.join(dataroot, 'DOLP'+sufffix,"test")  # create a path '/path/to/data/testDOLP'
    test_dirs.append(dir_S0_test_dst)
    test_dirs.append(dir_DOLP_test_dst)
    
    if os.path.exists(dir_S0_train_dst) is False:
        os.makedirs(dir_S0_train_dst)
        os.makedirs(dir_DOLP_train_dst)
        
        os.makedirs(dir_S0_test_dst)
        os.makedirs(dir_DOLP_test_dst)
    else:
        def del_dirsfile(dirs):
            for _dir in dirs:
                files = os.listdir(_dir)
                for file in files:
                    os.remove(os.path.join(_dir,file))
        del_dirsfile(train_dirs)
        del_dirsfile(test_dirs)
        
    
    # dirs = list([dir_S0,dir_D45,dir_D90,dir_DOLP])
    # for _dir in dirs:
    files = os.listdir(dir_S0_ori)
    files.remove('test')
    files.remove('train')
    files.sort()
    files_dict = {}
    for file in files:
        # key = file.split('_')[0]
        if 'dataset3' in file:
            key = file.split('_')[0]+'_'+file.split('_')[1]+file.split('_')[2][:1]
        else:
            key = file.split('_')[0]
        # if key[-1] == '2':
        #     continue
        if key in files_dict.keys():
            files_dict[key].append(file)
        else:
            files_dict[key]=[]
            files_dict[key].append(file)
            
            
    testdirs = []
    traindirs = []
    # os.makedirs()
    for key in files_dict.keys():
        random.shuffle(files_dict[key])
        for i in range(int(len(files_dict[key])*ratio)):
            testdirs.append(files_dict[key].pop())
        traindirs += files_dict[key]
        pass
    
    for _dir in testdirs:
        _dir_ori = os.path.join(dir_S0_ori,_dir)
        _dir_DOLP_ori = os.path.join(dir_DOLP_ori,_dir.split('.')[0]+'.png_dolp.'+_dir.split('.')[2])
        
        
        _dir_dst = os.path.join(dir_S0_test_dst,_dir)
        _dir_DOLP_dst = os.path.join(dir_DOLP_test_dst,_dir.split('.')[0]+'.png_dolp.'+_dir.split('.')[2])
        
        copy(_dir_ori,_dir_dst)
        copy(_dir_DOLP_ori,_dir_DOLP_dst)
    
    for _dir in traindirs:
        _dir_ori = os.path.join(dir_S0_ori,_dir)
        _dir_DOLP_ori = os.path.join(dir_DOLP_ori,_dir.split('.')[0]+'.png_dolp.'+_dir.split('.')[2])
        
        
        _dir_dst = os.path.join(dir_S0_train_dst,_dir)
        _dir_DOLP_dst = os.path.join(dir_DOLP_train_dst,_dir.split('.')[0]+'.png_dolp.'+_dir.split('.')[2])
        
        copy(_dir_ori,_dir_dst)
        copy(_dir_DOLP_ori,_dir_DOLP_dst)
        pass
    print('S0:trainLen:'+str(len(os.listdir(dir_S0_train_dst)))+'  testLen:'+str(len(os.listdir(dir_S0_test_dst))))
    print('DOLP:trainLen:'+str(len(os.listdir(dir_DOLP_train_dst)))+'  testLen:'+str(len(os.listdir(dir_DOLP_test_dst))))
        # index  = 
pass

if __name__ == '__main__':
    predeal(ratio=0.2)
    predeal('_rgb',0.2)
    predeal('_IR',0.2)
    pass
    

from PIL import Image
from  numpy.fft import fft2,fftshift
from data.image_folder import make_dataset
from data.base_dataset import BaseDataset, get_transform
from utils.util import normal
class MultiFakeGenerationPolarizationDataset(BaseDataset):
    """
    This dataset class can load unaligned/unpaired datasets.

    It requires two directories to host training images from domain A '/path/to/data/trainA'
    and from domain B '/path/to/data/trainB' respectively.
    You can train the model with the dataset flag '--dataroot /path/to/data'.
    Similarly, you need to prepare two directories:
    '/path/to/data/testA' and '/path/to/data/testB' during test time.
    """

    def __init__(self, opt):
        """Initialize this dataset class.

        Parameters:
            opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
        """
        BaseDataset.__init__(self, opt)
        # self.dir_S0 = os.path.join(opt.dataroot, opt.phase , 'S0',opt.controller)  # create a path '/path/to/data/trainS0'
        # self.dir_S1 = os.path.join(opt.dataroot, opt.phase , 'S1',opt.controller)  # create a path '/path/to/data/trainS1'
        # self.dir_S2 = os.path.join(opt.dataroot, opt.phase , 'S2',opt.controller)  # create a path '/path/to/data/trainS2'
        # self.dir_DOLP = os.path.join(opt.dataroot, opt.phase , 'DOLP',opt.controller)  # create a path '/path/to/data/trainDOLP'
        
        self.isTrain = (opt.controller == 'train')

        # self.S0_paths = sorted(make_dataset(self.dir_S0, opt.max_dataset_size))   # load images from '/path/to/data/trainA'
        # self.S1_paths = sorted(make_dataset(self.dir_S1, opt.max_dataset_size))    # load images from '/path/to/data/trainB
        # self.S2_paths = sorted(make_dataset(self.dir_S2, opt.max_dataset_size))   # load images from '/path/to/data/trainA'
        # self.DOLP_paths = sorted(make_dataset(self.dir_DOLP, opt.max_dataset_size))   # load images from '/path/to/data/trainA'
        self.limit_ratio = opt.limit_ratio
        self.S0_paths = []
        # self.S1_paths = []
        # self.S2_paths = []
        self.DOLP_paths = []
        
        if opt.controller == 'train' :
            self.dir_S0 = os.path.join(opt.dataroot, 'S0'+opt._suffix,"train")  # create a path '/path/to/data/trainS0'
            self.dir_DOLP = os.path.join(opt.dataroot, 'DOLP'+opt._suffix,"train")  # create a path '/path/to/data/trainDOLP'
            
            self.S0_paths += sorted(make_dataset(self.dir_S0, opt.max_dataset_size))   # load images from '/path/to/data/trainA'
            self.DOLP_paths += sorted(make_dataset(self.dir_DOLP, opt.max_dataset_size))   # load images from '/path/to/data/trainA'
            
        elif opt.controller == 'test':
            self.dir_S0 = os.path.join(opt.dataroot, 'S0'+opt._suffix,"test")  # create a path '/path/to/data/trainS0'
            self.dir_DOLP = os.path.join(opt.dataroot, 'DOLP'+opt._suffix,"test")  # create a path '/path/to/data/trainDOLP'

            self.S0_paths += sorted(make_dataset(self.dir_S0, opt.max_dataset_size))   # load images from '/path/to/data/trainA'
            self.DOLP_paths += sorted(make_dataset(self.dir_DOLP, opt.max_dataset_size))   # load images from '/path/to/data/trainA'
            
            
            if opt.test_all:
                self.dir_S0 = os.path.join(opt.dataroot, 'S0'+opt._suffix,"train")  # create a path '/path/to/data/trainS0'
                self.dir_DOLP = os.path.join(opt.dataroot, 'DOLP'+opt._suffix,"train")  # create a path '/path/to/data/trainDOLP'
                
                self.S0_paths += sorted(make_dataset(self.dir_S0, opt.max_dataset_size))   # load images from '/path/to/data/trainA'
                self.DOLP_paths += sorted(make_dataset(self.dir_DOLP, opt.max_dataset_size))   # load images from '/path/to/data/trainA'
           
        self.S0_size = len(self.S0_paths)  # get the size of dataset A
        self.DOLP_size = len(self.DOLP_paths)  # get the size of dataset B
        
        
        btoA = self.opt.direction == 'BtoA'
        input_nc = self.opt.output_nc if btoA else self.opt.input_nc       # get the number of channels of input image
        output_nc = self.opt.input_nc if btoA else self.opt.output_nc      # get the number of channels of output image
        
        self.transform_S0 = get_transform(self.opt, grayscale=(input_nc == 1))#,convert = self.opt.convert)
        self.transform_DOLP = get_transform(self.opt, grayscale=(input_nc == 1))#,convert = self.opt.convert)
        self.usefft = False
        if opt.frequency:
            self.usefft=True

    def __getitem__(self, index):
        """Return a data point and its metadata information.

        Parameters:
            index (int)      -- a random integer for data indexing

        Returns a dictionary that contains A, B, A_paths and B_paths
            A (tensor)       -- an image in the input domain
            B (tensor)       -- its corresponding image in the target domain
            A_paths (str)    -- image paths
            B_paths (str)    -- image paths
        """
        S0_path = self.S0_paths[index % self.S0_size]  # make sure index is within then range
        
        # if self.opt.serial_batches:   # make sure index is within then range
        #     index_S1 = index % self.S1_size
        # else:   # randomize the index for domain S1 to avoid fixed pairs.
        #     index_S1 = random.randint(0, self.S1_size - 1)
        
        # if self.opt.serial_batches:   # make sure index is within then range
        #     index_S2 = index % self.S2_size
        # else:   # randomize the index for domain S1 to avoid fixed pairs.
        #     index_S2 = random.randint(0, self.S2_size - 1)
        
        # if self.opt.serial_batches:   # make sure index is within then range
        #     index_DOLP = index % self.DOLP_size
        # else:   # randomize the index for domain S1 to avoid fixed pairs.
        #     index_DOLP = random.randint(0, self.DOLP_size - 1)
            
        # S1_path = self.S1_paths[index_S1]
        # S2_path = self.S2_paths[index_S2]
        # DOLP_path = self.S2_paths[index_DOLP]
        
        # S1_path = self.S1_paths[index]
        # S2_path = self.S2_paths[index]
        DOLP_path = self.DOLP_paths[index]
        
        
        S0_img = Image.open(S0_path).convert('RGB')
        # S1_img = Image.open(S1_path).convert('RGB')
        # S2_img = Image.open(S2_path).convert('RGB')
        DOLP_img = Image.open(DOLP_path).convert('RGB')

        # apply image transformation
        S0 = self.transform_S0(S0_img)
        # S1 = self.transform_S1(S1_img)
        # S2 = self.transform_S2(S2_img)
        # DOLP = torch.sqrt(S1**2 + S2**2)/S0
        DOLP = self.transform_DOLP(DOLP_img)
        
        if self.usefft:
            S0_F = fftshift(fft2(S0.detach().numpy()))
            # S1_F = fftshift(fft2(S1.detach().numpy()))
            # S2_F = fftshift(fft2(S2.detach().numpy()))
            DOLP_F = fftshift(fft2(DOLP.cpu().detach().numpy()))

            return {'S0': S0, 'S0_paths': S0_path,
                    'SD':DOLP, "SD_F":DOLP_F,
                    # 'S1': S1, 'S2':S2, 
                    #  'S1_paths': S1_path, 'S2_paths': S2_path,
                    # "S0_F":S0_F, "S1_F":S1_F, "S2_F":S2_F, 
                    "isTrain":self.isTrain,}
        else:
            return {'S0': S0, 'S0_paths': S0_path,
                    'SD':DOLP,
                    # 'S1': S1, 'S2':S2, 
                    #  'S1_paths': S1_path, 'S2_paths': S2_path,
                    "isTrain":self.isTrain,}

    def __len__(self):
        """Return the total number of images in the dataset.

        As we have two datasets with potentially different number of images,
        we take a maximum of
        """
        return int(self.S0_size*self.limit_ratio)


