import os
import numpy as np
import cv2
import random
from PIL import Image
import torchvision.transforms as transforms


def identify(root):
    if 'live' in root or '真人' in root:
        return True
    return False

def findsamename(root):
    dolpfiles = os.listdir(os.path.join(root,'DOLP'))
    s0files = os.listdir(os.path.join(root,'S0'))
    s0flag = False
    if '.png_s0' in s0files[0]:
        s0flag=True
        _s0files = []
        for s0file in s0files:
            temp = s0file.split('.png_s0')
            _s0files.append(temp[0]+temp[1])
        s0files = _s0files
    
    dolpflag = False
    if '.png_dolp' in dolpfiles[0]:
        dolpflag = True
        _dolpfiles = []
        for dolpfile in dolpfiles:
            temp = dolpfile.split('.png_dolp')
            _dolpfiles.append(temp[0]+temp[1])
        dolpfiles = _dolpfiles
          
    dolpset,s0set = set(dolpfiles),set(s0files)
    subfiles = list(dolpset&s0set)
    
    
    _s0files = []
    for s0file in subfiles:
        if s0flag:
            temp = s0file.split('.')
            name = os.path.join(root,'S0',temp[0]+'.png_s0.'+temp[1])
            _s0files.append((name,int(identify(name))))
        else:
            name = os.path.join(root,'S0',s0file)
            _s0files.append((name,int(identify(name))))
    s0files = np.array(_s0files)
    
    _dolpfiles = []
    for dolpfile in subfiles:
        if dolpflag:
            temp = dolpfile.split('.')
            name = os.path.join(root,'DOLP',temp[0]+'.png_dolp.'+temp[1])
            _dolpfiles.append((name,int(identify(name))))
        else:
            name = os.path.join(root,'DOLP',dolpfile)
            _dolpfiles.append((name,int(identify(name))))
    dolpfiles = np.array(_dolpfiles)
    dolpreal,dolpfake,s0real,s0fake = [],[],[],[]
    ind_ = np.where(dolpfiles[:,1]=='0')[0]
    ind = np.where(dolpfiles[:,1]=='1')[0]
    dolpreal = list(dolpfiles[ind])
    dolpfake = list(dolpfiles[ind_])
    s0real = list(s0files[ind])
    s0fake = list(s0files[ind_])
    
    return dolpreal,dolpfake,s0real,s0fake

def searchalldata(root,flag=False):
    dolpreal,dolpfake,s0real,s0fake = [],[],[],[]
    if flag:
        items = []
        items.append(os.path.join('dataset1_live','HUT'))
        items.append(os.path.join('dataset2_all'))
        items.append(os.path.join('dataset3_attack'))
        items.append(os.path.join('dataset4_gen'))
    else:
        items = os.listdir(root)
    for item in items:
        if item.__len__() <=1 or item == 'Deg' or '.txt' in item or 'S2' in item or 'S3' in item or 'Only_Face' in item:
            continue
        if '.' in item:
            if identify(root):
                if 'DOLP' in root:
                    dolpreal.append((os.path.join(root,item),1))
                elif 'S0' in root:
                    s0real.append((os.path.join(root,item),1))
            else:
                if 'DOLP' in root:
                    dolpfake.append((os.path.join(root,item),0))
                elif 'S0' in root:
                    s0fake.append((os.path.join(root,item),0))
        else:
            if 'dataset3_attack' in item or 'HUT' in item:
                tmp1,tmp2,tmp3,tmp4 = findsamename(os.path.join(root,item))
                dolpreal = dolpreal + tmp1
                dolpfake = dolpfake + tmp2
                s0real = s0real + tmp3
                s0fake = s0fake + tmp4
            else:
                tmp1,tmp2,tmp3,tmp4 = searchalldata(os.path.join(root,item))
                dolpreal = dolpreal + tmp1
                dolpfake = dolpfake + tmp2
                s0real = s0real + tmp3
                s0fake = s0fake + tmp4
    return dolpreal,dolpfake,s0real,s0fake

def shuffle_split_data(data,testratio=0.2):
    # random.shuffle(real)
    # random.shuffle(fake)
    # len1 = len(real)
    # len2 = len(fake)
    # tlen1 = int(len1*testratio)
    # tlen2 = int(len2*testratio)
    
    # realtest,faketest = real[:tlen1],fake[:tlen2]
    # realtrain,faketrain = real[tlen1:],fake[tlen2:]
    
    random.shuffle(data)
    len1 = len(data)
    tlen1 = int(len1*testratio)
    
    datatest= data[:tlen1]
    datatrain= data[tlen1:]

    return datatrain,datatest

def gendatalist(root,testratio=0.2):
    cats = os.listdir(os.path.join(root,'dataset2_all'))
    cats.remove('1')
    cats.remove('2')
    cats.remove('Only_Face')
    for i,(cat) in enumerate(cats):
        cats[i] = os.path.join('dataset2_all',cat)
    items = []
    items.append(os.path.join('dataset1_live','HUT'))
    # items.append(os.path.join('dataset2_all'))
    items += cats
    items.append(os.path.join('dataset3_attack'))
    # items.append(os.path.join('dataset4_gen'))
    dolptraindir,dolptestdir,s0traindir,s0testdir = [],[],[],[]
    for item in items:
        dolpreal,dolpfake,s0real,s0fake = searchalldata(os.path.join(root,item),False)
        if len(dolpreal) > 0 :
            indreal = np.arange(0,len(dolpreal))
            indrealtrain,indrealtest = shuffle_split_data(indreal,testratio)
            dolpreal,s0real = np.array(dolpreal),np.array(s0real)
            
        if len(dolpfake) > 0:
            indfake = np.arange(0,len(dolpfake))
            indfaketrain,indfaketest = shuffle_split_data(indfake,testratio)
            dolpfake,s0fake = np.array(dolpfake),np.array(s0fake)
        # indrealtrain,indfaketrain,indrealtest,indfaketest = shuffle_split_data(indreal,indfake,testratio)
        
        # dolpreal,dolpfake,s0real,s0fake = np.array(dolpreal),np.array(dolpfake),np.array(s0real),np.array(s0fake)
        _dolptraindir,_dolptestdir,_s0traindir,_s0testdir = [],[],[],[]
        if len(dolpreal) > 0 and len(dolpfake) > 0:
            # _dolptraindir,_dolptestdir = np.vstack((dolpreal[indrealtrain],dolpfake[indfaketrain])),np.vstack((dolpreal[indrealtest],dolpfake[indfaketest]))
            # _s0traindir,_s0testdir = np.vstack((s0real[indrealtrain],s0fake[indfaketrain])),np.vstack((s0real[indrealtest],s0fake[indfaketest]))
            _dolptraindir,_dolptestdir = list(dolpreal[indrealtrain])+list(dolpfake[indfaketrain]),list(dolpreal[indrealtest])+list(dolpfake[indfaketest])
            _s0traindir,_s0testdir = list(s0real[indrealtrain])+list(s0fake[indfaketrain]),list(s0real[indrealtest])+list(s0fake[indfaketest])
        elif len(dolpreal) > 0 and len(dolpfake) == 0:
            _dolptraindir,_dolptestdir = list(dolpreal[indrealtrain]),list(dolpreal[indrealtest])
            if len(s0real) == len(dolpreal):
                _s0traindir,_s0testdir = list(s0real[indrealtrain]),list(s0real[indrealtest])
        elif len(dolpreal) == 0 and len(dolpfake) > 0:
            _dolptraindir,_dolptestdir = list(dolpfake[indfaketrain]),list(dolpfake[indfaketest])
            if len(s0fake) == len(dolpfake):
                _s0traindir,_s0testdir = list(s0fake[indfaketrain]),list(s0fake[indfaketest])
        
        dolptraindir+=_dolptraindir
        dolptestdir+=_dolptestdir
        s0traindir+=_s0traindir
        s0testdir+=_s0testdir
        
    return np.array(dolptraindir),np.array(dolptestdir),np.array(s0traindir),np.array(s0testdir)

def save_train_test_to_txt(traindir,testdir,txtsavepath,filenameprefix,version):
    # traindir,testdir=gendatalist(datadir)
    # with open(os.path.join(txtsavepath,filenameprefix+'all.txt'),'w') as f:
    #     all_ = traindir + testdir
    #     for line in all_:
    #         f.write(line[0]+','+str(line[1])+'\n')
            
    with open(os.path.join(txtsavepath,filenameprefix+'_'+version+'_train.txt'),'w') as f:
        for line in traindir:
            f.write(line[0]+','+str(line[1])+'\n')
    
    with open(os.path.join(txtsavepath,filenameprefix+'_'+version+'_test.txt'),'w') as f:
        for line in testdir:
            f.write(line[0]+','+str(line[1])+'\n')


def change(root):
    root = os.path.join(root,'dataset2_all')
    types = os.listdir(root)
    for typ in types:
        if typ == '1' or typ == '2':
            continue;
        else:
            x1 =  os.listdir(os.path.join(root,typ,'DOLP'))
            
            x2 =  os.listdir(os.path.join(root,typ,'S0'))
            if '真人' in typ:
                x1.sort(key= lambda x:int(x[x.find('(')+1:x.find(')')]))
                x2.sort(key= lambda x:int(x[x.find('(')+1:x.find(')')]))
            for i in range(len(x1)-1,-1,-1):
                if x1[i] == x2[i]:
                    continue
                os.rename(os.path.join(root,typ,'S0',x2[i]),os.path.join(root,typ,'S0',x1[i]))
            pass
    
    pass

if __name__ == '__main__':
    datadir = os.path.join(os.path.abspath('.'),'datasets','multi_modal_Polarization',"IJCB_txt")
    mode = 'gen'
    ratio = 0.5
    version = 'v'+str(int(100*ratio))+'_no_digital'
    if mode == 'gen':
        dolptraindir,dolptestdir,s0traindir,s0testdir = gendatalist(datadir,ratio)
        print('DOLP:  trainLen:'+str(len(dolptraindir))+'  testLen:'+str(len(dolptestdir)))
        print('S0:  trainLen:'+str(len(s0traindir))+'  testLen:'+str(len(s0testdir)))
        save_train_test_to_txt(dolptraindir,dolptestdir,datadir,'DOLP_',version)
        save_train_test_to_txt(s0traindir,s0testdir,datadir,'S0_',version)     
    # elif mode == 'test':
    #     m = MultiPolarFaceDataset(datadir,'train')
    #     m.out='multi'
    #     m.__getitem__(150)
    elif mode == 'change':
        change(datadir)
    pass



from data.base_dataset import BaseDataset
from data.base_dataset import BaseDataset, get_transform
class MultiPolarFaceDataset(BaseDataset):
    """
    This dataset class can load unaligned/unpaired datasets.

    It requires two directories to host training images from domain A '/path/to/data/trainA'
    and from domain B '/path/to/data/trainB' respectively.
    You can train the model with the dataset flag '--dataroot /path/to/data'.
    Similarly, you need to prepare two directories:
    '/path/to/data/testA' and '/path/to/data/testB' during test time.
    """

    def __init__(self, opt):
        """Initialize this dataset class.

        Parameters:
            opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
        """
        BaseDataset.__init__(self, opt)
        self.npoints = None
        # self.image_size_w = 256
        # self.image_size_h = 256
        self.opt = opt
        # self.set(opt.dataroot,opt.controller)
        # if opt.controller != 'display':
        #     self.transform = transforms.Compose([
        #         transforms.ToTensor(),
        #         transforms.RandomResizedCrop(self.image_size_w),
        #         transforms.RandomHorizontalFlip(),
        #     ])
        # else:
        #     self.transform = transforms.Compose([
        #         transforms.ToTensor(),
        #     ])
        # if opt.controller == 'train':
        #     self.dolpfiletxt = os.path.join(opt.dataroot,'DOLP_'+'_v'+opt.version+'_'+opt.controller+'.txt')
        #     self.s0filetxt = os.path.join(opt.dataroot,'S0_'+'_v'+opt.version+'_'+opt.controller+'.txt')
        # if opt.controller == 'test':
        #     opt.version = opt.version.split('_')[0]
        
        self.dolpfiletxt = os.path.join(opt.dataroot,'DOLP_'+'_v'+opt.version+'_'+opt.controller+'.txt')
        self.s0filetxt = os.path.join(opt.dataroot,'S0_'+'_v'+opt.version+'_'+opt.controller+'.txt')
        
        dolpmat = np.genfromtxt(self.dolpfiletxt,delimiter=',', dtype=np.str)
        s0mat = np.genfromtxt(self.s0filetxt,delimiter=',', dtype=np.str)
        
        if 'no_digital' in opt.name and opt.controller == 'train':
            tmp = []
            for dolp in dolpmat:
                if 'dataset4_gen' in dolp[0].__str__():
                    continue
                tmp.append(dolp)
            dolpmat = np.array(tmp)
            
            tmp = []
            for s0 in s0mat:
                if 'dataset4_gen' in s0[0].__str__():
                    continue
                tmp.append(s0)
            s0mat = np.array(tmp)
        
        self.modality = self.opt.input_modality
        
        self.dolpdatapath = dolpmat[:,0].astype(np.str)
        self.s0datapath = s0mat[:,0].astype(np.str)
        
        self.label = dolpmat[:,1].astype(np.int)
    
        self.SDlen = len(self.dolpdatapath)
        self.S0len = len(self.s0datapath)
        self.get_category()
        
        self.transform_S0 = get_transform(self.opt, grayscale=(self.opt.input_nc == 1))
        self.transform_DOLP = get_transform(self.opt, grayscale=(self.opt.input_nc == 1))
        pass

     
    def __getitem__(self, idx):
        """Return a data point and its metadata information.

        Parameters:
            index (int)      -- a random integer for data indexing

        Returns a dictionary that contains A, B, A_paths and B_paths
            A (tensor)       -- an image in the input domain
            B (tensor)       -- its corresponding image in the target domain
            A_paths (str)    -- image paths
            B_paths (str)    -- image paths
        """
        
        
        if self.modality == 'S0':
            s0path = self.s0datapath[idx]
            s0 = Image.open(s0path).convert('RGB') # cv2.resize(cv2.imread(s0path),(self.image_size_w,self.image_size_h),0,0,cv2.INTER_LINEAR)
            s0 = self.transform_S0(s0)
            label = self.label[idx]
            cat = self.category[idx]
            return {'S0':s0,
                    'label':label,
                    'category':cat,
                    'cat_name':self.cat_name,
                    }
        elif self.modality == 'SD':
            dolppath = self.dolpdatapath[idx]
            dolp = Image.open(dolppath).convert('RGB') # cv2.resize(cv2.imread(dolppath),(self.image_size_w,self.image_size_h),0,0,cv2.INTER_LINEAR)
            dolp = self.transform_DOLP(dolp)
            label = self.label[idx]
            cat = self.category[idx]
            return {'SD':dolp,
                    'label':label,
                    'category':cat,
                    'cat_name':self.cat_name,
                    }
    def __len__(self):
        """Return the total number of images in the dataset.

        As we have two datasets with potentially different number of images,
        we take a maximum of
        """
        
        return int(float(getattr(self,self.modality+'len')))
    def get_category(self):
        self.cat_name = {"面具":0,"A4纸":1,"定制假头":2,"显示屏":3,"相纸":4,"真人":5,"数字":6}
        # self.cat_name = {"真人":0,"物理":1,"数字":2}
        self.category = []
        if self.modality == 'S0':
            datapath = self.s0datapath
        elif self.modality == 'SD':
            datapath = self.dolpdatapath
        for path in datapath:
            if "dataset1_live" in path:
                self.category.append(self.cat_name["真人"])
                pass
            elif "dataset2_all" in path:
                for name in self.cat_name.keys():
                    if name in path:
                        self.category.append(self.cat_name[name])
                        # if name =="真人":
                        #     self.category.append(self.cat_name["真人"])
                        # else:
                        #     self.category.append(self.cat_name["物理"])
                        # break
                pass
            elif "dataset3_attack" in path:
                if '1001_' in path or '10001_' in path or '10002_' in path or '10003_' in path or '10004_' in path or '10005_' in path:
                    self.category.append(self.cat_name["显示屏"])
                elif '20001_' in path or '20002_' in path or '20003_' in path:
                    self.category.append(self.cat_name["相纸"])
                elif '30001_' in path or '30002_' in path or '30003_' in path:
                    self.category.append(self.cat_name["A4纸"])
                elif '40001_' in path or '40002_' in path:
                    self.category.append(self.cat_name["定制假头"])
                elif '50001_' in path:
                    self.category.append(self.cat_name["面具"])
                elif '60001_' in path:
                    self.category.append(self.cat_name["显示屏"])
            #    pass
            # elif "dataset3_attack" in path:
            #     if '1001_' in path or '10001_' in path or '10002_' in path or '10003_' in path or '10004_' in path or '10005_' in path:
            #         self.category.append(self.cat_name["物理"])
            #     elif '20001_' in path or '20002_' in path or '20003_' in path:
            #         self.category.append(self.cat_name["物理"])
            #     elif '30001_' in path or '30002_' in path or '30003_' in path:
            #         self.category.append(self.cat_name["物理"])
            #     elif '40001_' in path or '40002_' in path:
            #         self.category.append(self.cat_name["物理"])
            #     elif '50001_' in path:
            #         self.category.append(self.cat_name["物理"])
            #     elif '60001_' in path:
            #         self.category.append(self.cat_name["物理"])
            #     pass
            elif "dataset4_gen" in path:
                self.category.append(self.cat_name["数字"])
                
            
    
    def vis_res_deal_func(self,data,config=None,model_flag=True):
        
        # T1 = transforms.ToTensor()
        if model_flag is not None:
            T = transforms.ToPILImage()
            # data = T1(data['s0'])
            for key in data.keys():
                if key == 'label':
                    continue
                data[key] = cv2.cvtColor(np.asarray(T(data[key].cpu())),cv2.COLOR_RGB2BGR)
        else:
            T = transforms.ToPILImage()
            data = cv2.cvtColor(np.asarray(T(data[config.input].cpu())),cv2.COLOR_RGB2BGR)
        # data[:,:,0] = linear_(data[:,:,0])
        # data[:,:,1] = linear_(data[:,:,1])
        # data[:,:,2] = linear_(data[:,:,2])
        # data[:,:,0],_ = histeq(data[:,:,0])
        # data[:,:,1],data[:,:,2] = data[:,:,0],data[:,:,0]
        return data
