import os
import numpy as np
import cv2
import random
from PIL import Image
import torchvision.transforms as transforms
from  numpy.fft import fft2,fftshift

def gendeal(srcdir,dstdir,sufffix='',srcmodality='fake_SAB',modality = 'DOLP'):
    
    from shutil import copy
    
    dataroot = "/data1/yalin.huang/face_anti_spoofing_dataset/dataset1_live/HUT/Only_Face/"
    
    dir_train_ori = os.path.join(dataroot, modality+sufffix,"train")
    
    train_dirs = []
    dir_train_dst = os.path.join(dstdir, modality+sufffix,"train")  # create a path '/path/to/data/trainDOLP'
    train_dirs.append(dir_train_dst)
    
    
    dir_test_ori = os.path.join(dataroot, modality+sufffix,"test")
    
    test_dirs = []
    dir_test_dst = os.path.join(dstdir, modality+sufffix,"test")  # create a path '/path/to/data/testS0'
    test_dirs.append(dir_test_dst)

    
    if os.path.exists(dir_train_dst) is False:
        os.makedirs(dir_train_dst)
        
        os.makedirs(dir_test_dst)
    else:
        def del_dirsfile(dirs):
            for _dir in dirs:
                files = os.listdir(_dir)
                for file in files:
                    os.remove(os.path.join(_dir,file))
        del_dirsfile(train_dirs)
        del_dirsfile(test_dirs)
        
    testdirs = os.listdir(dir_test_ori)
    traindirs = os.listdir(dir_train_ori)
    
    for _dir in testdirs:
        _dir_src = os.path.join(srcdir,_dir.split('.')[0]+'.png_s0_'+srcmodality+'.'+_dir.split('.')[2])
        
        _dir_dst = os.path.join(dir_test_dst,_dir)
        
        copy(_dir_src,_dir_dst)
    
    for _dir in traindirs:
        _dir_src = os.path.join(srcdir,_dir.split('.')[0]+'.png_s0_'+srcmodality+'.'+_dir.split('.')[2])
        _dir_dst = os.path.join(dir_train_dst,_dir)
        
        copy(_dir_src,_dir_dst)
        pass
    print(modality.lower()+':  trainLen:'+str(len(os.listdir(dir_train_dst)))+'  testLen:'+str(len(os.listdir(dir_test_dst))))
        # index  = 
    # files = os.listdir(srcdir)
    # for file in files:
    #     pass
    pass

if __name__ == '__main__':
    # srcdir = os.path.join("/home","yalin.huang","test","results","cycle_gan_SAB_S0D","HUT","Only_Face_latest","images")
    # dstdir = os.path.join("/home","yalin.huang","test","datasets","multi_modal_Polarization","dataset4_gen")
    # gendeal(srcdir,dstdir,'','fake_SAB','DOLP')
    srcdir = os.path.join("/home","yalin.huang","test","results","cycle_gan_S012D_fft_v2","HUT","Only_Face_latest","images")
    dstdir = os.path.join("/home","yalin.huang","test","datasets","multi_modal_Polarization","dataset5_gen")
    gendeal(srcdir,dstdir,'','fake_S0D','DOLP')
    gendeal(srcdir,dstdir,'','fake_S01','S1')
    gendeal(srcdir,dstdir,'','fake_S02','S2')
    gendeal(srcdir,dstdir,'','rec_SD0','S0')
    pass



from data.image_folder import make_dataset
from data.base_dataset import BaseDataset, get_transform
class V202207110920Dataset(BaseDataset):
    """
    This dataset class can load unaligned/unpaired datasets.

    It requires two directories to host training images from domain A '/path/to/data/trainA'
    and from domain B '/path/to/data/trainB' respectively.
    You can train the model with the dataset flag '--dataroot /path/to/data'.
    Similarly, you need to prepare two directories:
    '/path/to/data/testA' and '/path/to/data/testB' during test time.
    """

    def __init__(self, opt):
        """Initialize this dataset class.

        Parameters:
            opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
        """
        BaseDataset.__init__(self, opt)
        self.isTrain = (opt.controller == 'train')

        # self.S0_paths = sorted(make_dataset(self.dir_S0, opt.max_dataset_size))   # load images from '/path/to/data/trainA'
        # self.S1_paths = sorted(make_dataset(self.dir_S1, opt.max_dataset_size))    # load images from '/path/to/data/trainB
        # self.S2_paths = sorted(make_dataset(self.dir_S2, opt.max_dataset_size))   # load images from '/path/to/data/trainA'
        # self.DOLP_paths = sorted(make_dataset(self.dir_DOLP, opt.max_dataset_size))   # load images from '/path/to/data/trainA'
        # self.limit_ratio = opt.limit_ratio
        
        self.S0_paths = []
        self.S1_paths=[]
        self.S2_paths=[]
        self.DOLP_paths = []
        
        
        self.dolpfiletxt = os.path.join(opt.dataroot,'DOLP_'+'_v'+opt.dataversion+'_'+opt.controller+'.txt')
        self.s0filetxt = os.path.join(opt.dataroot,'S0_'+'_v'+opt.dataversion+'_'+opt.controller+'.txt')
        self.s1filetxt = os.path.join(opt.dataroot,'S1_'+'_v'+opt.dataversion+'_'+opt.controller+'.txt')
        self.s2filetxt = os.path.join(opt.dataroot,'S2_'+'_v'+opt.dataversion+'_'+opt.controller+'.txt')
        
        dolpmat = np.genfromtxt(self.dolpfiletxt,delimiter=',', dtype=np.str)
        s0mat = np.genfromtxt(self.s0filetxt,delimiter=',', dtype=np.str)
        s1mat = np.genfromtxt(self.s1filetxt,delimiter=',', dtype=np.str)
        s2mat = np.genfromtxt(self.s2filetxt,delimiter=',', dtype=np.str)
        
        # self.modality = self.opt.input_modality
        
        self.DOLP_paths = list(dolpmat[:].astype(np.str))
        self.S0_paths = list(s0mat[:].astype(np.str))
        self.S1_paths = list(s1mat[:].astype(np.str))
        self.S2_paths = list(s2mat[:].astype(np.str))
        
        if not self.isTrain and self.opt.test_all:
            self.dolpfiletxt = os.path.join(opt.dataroot,'DOLP_'+'_v'+opt.dataversion+'_train.txt')
            self.s0filetxt = os.path.join(opt.dataroot,'S0_'+'_v'+opt.dataversion+'_train.txt')
            self.s1filetxt = os.path.join(opt.dataroot,'S1_'+'_v'+opt.dataversion+'_train.txt')
            self.s2filetxt = os.path.join(opt.dataroot,'S2_'+'_v'+opt.dataversion+'_train.txt')
            
            dolpmat = np.genfromtxt(self.dolpfiletxt,delimiter=',', dtype=np.str)
            s0mat = np.genfromtxt(self.s0filetxt,delimiter=',', dtype=np.str)
            s1mat = np.genfromtxt(self.s1filetxt,delimiter=',', dtype=np.str)
            s2mat = np.genfromtxt(self.s2filetxt,delimiter=',', dtype=np.str)
            self.DOLP_paths += list(dolpmat[:].astype(np.str))
            self.S0_paths += list(s0mat[:].astype(np.str))
            self.S1_paths += list(s1mat[:].astype(np.str))
            self.S2_paths += list(s2mat[:].astype(np.str))
            

        
        if not self.isTrain and self.opt.test_all:
            self.dolpfiletxt = os.path.join(opt.dataroot,'DOLP_'+'_v'+opt.dataversion+'_train.txt')
            self.s0filetxt = os.path.join(opt.dataroot,'S0_'+'_v'+opt.dataversion+'_train.txt')
            self.s1filetxt = os.path.join(opt.dataroot,'S1_'+'_v'+opt.dataversion+'_train.txt')
            self.s2filetxt = os.path.join(opt.dataroot,'S2_'+'_v'+opt.dataversion+'_train.txt')
            
            dolpmat = np.genfromtxt(self.dolpfiletxt,delimiter=',', dtype=np.str)
            s0mat = np.genfromtxt(self.s0filetxt,delimiter=',', dtype=np.str)
            s1mat = np.genfromtxt(self.s1filetxt,delimiter=',', dtype=np.str)
            s2mat = np.genfromtxt(self.s2filetxt,delimiter=',', dtype=np.str)
            self.DOLP_paths += list(dolpmat[:].astype(np.str))
            self.S0_paths += list(s0mat[:].astype(np.str))
            self.S1_paths += list(s1mat[:].astype(np.str))
            self.S2_paths += list(s2mat[:].astype(np.str))
            
        self.S0_size = len(self.S0_paths) # get the size of dataset A
        # self.GS0_size = len(self.gen_S0_path)
        # self.DOLP_size = len(self.DOLP_paths)  # get the size of dataset B
        # self.GDOLP_size = len(self.gen_DOLP_path)  # get the size of dataset B
        # self.get_category()
        
        self.transform_S0 = get_transform(self.opt, grayscale=(self.opt.input_nc == 1))#,convert = self.opt.convert)
        self.transform_S1 = get_transform(self.opt, grayscale=(self.opt.input_nc == 1))
        self.transform_S2 = get_transform(self.opt, grayscale=(self.opt.input_nc == 1))
        self.transform_DOLP = get_transform(self.opt, grayscale=(self.opt.input_nc == 1))#,convert = self.opt.convert)
        # self.usefft = False
        # if opt.lambda_fre != 0:
        self.usefft=True


    def __getitem__(self, index):
        """Return a data point and its metadata information.

        Parameters:
            index (int)      -- a random integer for data indexing

        Returns a dictionary that contains A, B, A_paths and B_paths
            A (tensor)       -- an image in the input domain
            B (tensor)       -- its corresponding image in the target domain
            A_paths (str)    -- image paths
            B_paths (str)    -- image paths
        """

        S0_path = self.S0_paths[index]  # make sure index is within then range
        S1_path = self.S1_paths[index]
        S2_path = self.S2_paths[index]
        DOLP_path = self.DOLP_paths[index]
        
        
        S0_img = Image.open(S0_path).convert('RGB')
        S1_img = Image.open(S1_path).convert('RGB')
        S2_img = Image.open(S2_path).convert('RGB')
        DOLP_img = Image.open(DOLP_path).convert('RGB')
        
        # apply image transformation
        S0 = self.transform_S0(S0_img)
        S1 = self.transform_S1(S1_img)
        S2 = self.transform_S2(S2_img)
        # DOLP = torch.sqrt(S1**2 + S2**2)/S0
        DOLP = self.transform_DOLP(DOLP_img)
        
        if self.usefft:
            S0_F = fftshift(fft2(S0.detach().numpy()))
            S1_F = fftshift(fft2(S1.detach().numpy()))
            S2_F = fftshift(fft2(S2.detach().numpy()))
            DOLP_F = fftshift(fft2(DOLP.cpu().detach().numpy()))

            return {'S0': S0, 'S1': S1, 'S2':S2, 'SD':DOLP,
                    'S0_paths': S0_path, 'S1_paths': S1_path, 'S2_paths': S2_path,
                    "S0_F":S0_F, "S1_F":S1_F, "S2_F":S2_F, "SD_F":DOLP_F,'frequency':True}
        else:
            return {'S0': S0, 'S1': S1, 'S2':S2, 'SD':DOLP,
                    'S0_paths': S0_path, 'S1_paths': S1_path, 'S2_paths': S2_path,'frequency':False}

    def __len__(self):
        """Return the total number of images in the dataset.

        As we have two datasets with potentially different number of images,
        we take a maximum of
        """
        # return int(self.S0_size*self.limit_ratio)
        return int(self.S0_size)

    def get_category(self):
        self.cat_name = {"面具":0,"A4纸":1,"定制假头":2,"显示屏":3,"相纸":4,"真人":5,"数字":6}
        # self.cat_name = {"真人":0,"物理":1,"数字":2}
        self.category = []

        for path in self.S0_paths:
            if "geniune_domain" in path:
                self.category.append(self.cat_name["真人"])
                pass
            elif "dataset2_all" in path:
                for name in self.cat_name.keys():
                    if name in path:
                        self.category.append(self.cat_name[name])
                        # if name =="真人":
                        #     self.category.append(self.cat_name["真人"])
                        # else:
                        #     self.category.append(self.cat_name["物理"])
                        # break
                pass
            elif "physical_domain" in path:
                if 'screen' in path:
                    self.category.append(self.cat_name["显示屏"])
                elif 'photo' in path:
                    self.category.append(self.cat_name["相纸"])
                elif 'A4_paper' in path:
                    self.category.append(self.cat_name["A4纸"])
                elif 'silicon_head' in path:
                    self.category.append(self.cat_name["定制假头"])
                elif 'silicon_mask' in path:
                    self.category.append(self.cat_name["面具"])

            elif "digital_domain" in path:
                self.category.append(self.cat_name["数字"])
    
    def vis_res_deal_func(self,data,config=None,model_flag=True):
        
        # T1 = transforms.ToTensor()
        if model_flag is not None:
            T = transforms.ToPILImage()
            # data = T1(data['s0'])
            for key in data.keys():
                if key == 'label':
                    continue
                data[key] = cv2.cvtColor(np.asarray(T(data[key].cpu())),cv2.COLOR_RGB2BGR)
        else:
            T = transforms.ToPILImage()
            data = cv2.cvtColor(np.asarray(T(data[config.input].cpu())),cv2.COLOR_RGB2BGR)
        # data[:,:,0] = linear_(data[:,:,0])
        # data[:,:,1] = linear_(data[:,:,1])
        # data[:,:,2] = linear_(data[:,:,2])
        # data[:,:,0],_ = histeq(data[:,:,0])
        # data[:,:,1],data[:,:,2] = data[:,:,0],data[:,:,0]
        return data
