import os
import numpy as np
import cv2
import random
from PIL import Image
import torchvision.transforms as transforms
from  numpy.fft import fft2,fftshift

from data.base_dataset import BaseDataset, get_transform
class V202207131333CASIASURFDataset(BaseDataset):
    """
    This dataset class can load unaligned/unpaired datasets.

    It requires two directories to host training images from domain A '/path/to/data/trainA'
    and from domain B '/path/to/data/trainB' respectively.
    You can train the model with the dataset flag '--dataroot /path/to/data'.
    Similarly, you need to prepare two directories:
    '/path/to/data/testA' and '/path/to/data/testB' during test time.
    """

    def __init__(self, opt):
        """Initialize this dataset class.

        Parameters:
            opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
        """
        BaseDataset.__init__(self, opt)
        self.isTrain = (opt.controller == 'train')

        # self.color_paths = sorted(make_dataset(self.dir_color, opt.max_dataset_size))   # load images from '/path/to/data/trainA'
        # self.depth_paths = sorted(make_dataset(self.dir_depth, opt.max_dataset_size))    # load images from '/path/to/data/trainB
        # self.ir_paths = sorted(make_dataset(self.dir_ir, opt.max_dataset_size))   # load images from '/path/to/data/trainA'

        # self.limit_ratio = opt.limit_ratio
        
        self.color_paths = []
        self.depth_paths=[]
        self.ir_paths=[]
        
        self.color = False
        self.depth = False
        self.ir = False
        self.dolp = False
        
        txts = os.listdir(opt.dataroot)
        for txt in txts:
            if txt.split('__')[0] == 'color':
                self.color = True
            elif txt.split('__')[0] == 'depth':
                self.depth = True
            elif txt.split('__')[0] == 'ir':
                self.ir = True
            elif txt.split('__')[0] == 'dolp':
                self.dolp = True
        
        self.label = None
        self.size = None
        
        if self.color:
            self.colorfiletxt = os.path.join(opt.dataroot,'color_'+'_v'+opt.dataversion+'_'+opt.controller+'.txt')
            colormat = np.genfromtxt(self.colorfiletxt,delimiter=',', dtype=np.str)
            self.color_paths = colormat[:,0].astype(np.str)
            label = colormat[:,1].astype(np.int)
            if self.label is None:
                self.label = label
            elif np.sum(np.abs(self.label-label)):
                print("The label of source text file is wrong" )
            
            if self.size is None:
                self.size = len(self.color_paths)
            elif self.size != len(self.color_paths):
                print("The size of source text file is wrong" )
            
            self.transform_color = get_transform(self.opt, grayscale=(self.opt.input_nc == 1))
        
        if self.depth:
            self.depthfiletxt = os.path.join(opt.dataroot,'depth_'+'_v'+opt.dataversion+'_'+opt.controller+'.txt')
            depthmat = np.genfromtxt(self.depthfiletxt,delimiter=',', dtype=np.str)
            self.depth_paths = depthmat[:,0].astype(np.str)
            label = depthmat[:,1].astype(np.int)
            if self.label is None:
                self.label = label
            elif np.sum(np.abs(self.label-label)):
                print("The label of source text file is wrong" )
            
            if self.size is None:
                self.size = len(self.depth_paths)
            elif self.size != len(self.depth_paths):
                print("The size of source text file is wrong" )
                
            self.transform_depth = get_transform(self.opt, grayscale=(self.opt.input_nc == 1))
            
        if self.ir:
            self.irfiletxt = os.path.join(opt.dataroot,'ir_'+'_v'+opt.dataversion+'_'+opt.controller+'.txt')
            irmat = np.genfromtxt(self.irfiletxt,delimiter=',', dtype=np.str)
            self.ir_paths = irmat[:,0].astype(np.str)
            label = irmat[:,1].astype(np.int)
            if self.label is None:
                self.label = label
            elif np.sum(np.abs(self.label-label)):
                print("The label of source text file is wrong" )
            
            if self.size is None:
                self.size = len(self.ir_paths)
            elif self.size != len(self.ir_paths):
                print("The size of source text file is wrong" )
            
            self.transform_ir = get_transform(self.opt, grayscale=(self.opt.input_nc == 1))
        
        if self.dolp:
            self.dolpfiletxt = os.path.join(opt.dataroot,'dolp_'+'_v'+opt.dataversion+'_'+opt.controller+'.txt')
            dolpmat = np.genfromtxt(self.dolpfiletxt,delimiter=',', dtype=np.str)
            self.dolp_paths = dolpmat[:,0].astype(np.str)
            label = dolpmat[:,1].astype(np.int)
            if self.label is None:
                self.label = label
            elif np.sum(np.abs(self.label-label)):
                print("The label of source text file is wrong" )
            
            if self.size is None:
                self.size = len(self.dolp_paths)
            elif self.size != len(self.dolp_paths):
                print("The size of source text file is wrong" )
            
            self.transform_dolp = get_transform(self.opt, grayscale=(self.opt.input_nc == 1))
        

         # get the size of dataset A
        # self.Gcolor_size = len(self.gen_color_path)
        # self.DOLP_size = len(self.DOLP_paths)  # get the size of dataset B
        # self.GDOLP_size = len(self.gen_DOLP_path)  # get the size of dataset B
        # self.get_category()
        
        #,convert = self.opt.convert)
        self.get_category()
        
        self.usefft = False
        if 'lambda_fre' in opt.__dir__() and opt.lambda_fre != 0:
            self.usefft=True

    def __getitem__(self, index):
        """Return a data point and its metadata information.

        Parameters:
            index (int)      -- a random integer for data indexing

        Returns a dictionary that contains A, B, A_paths and B_paths
            A (tensor)       -- an image in the input domain
            B (tensor)       -- its corresponding image in the target domain
            A_paths (str)    -- image paths
            B_paths (str)    -- image paths
        """
        ans = {}
        label = self.label[index]
        cat = self.category[index]
         
        ans['label'] = label
        ans['category'] = cat
        ans['cat_name'] = self.cat_name
        
        if self.color:
            color_path = self.color_paths[index]  # make sure index is within then range
            color_img = Image.open(color_path).convert('RGB')
            if color_path.__str__().split('/')[-1].__len__() < 16:
                color_path = '_'.join(color_path.__str__().split('/')[-5:])
            else:
                color_path = color_path.__str__().split('/')[-1]
            color = self.transform_color(color_img)
            ans['color'] = color
            ans['color_paths'] = color_path
            if self.usefft:
                color_F = fftshift(fft2(color.detach().numpy()))
                ans['color_F'] = color_F
        
        if self.depth:
            depth_path = self.depth_paths[index]
            depth_img = Image.open(depth_path).convert('RGB')
            if depth_path.__str__().split('/')[-1].__len__() < 16:
                depth_path = '_'.join(depth_path.__str__().split('/')[-5:])
            else:
                depth_path = depth_path.__str__().split('/')[-1]
            depth = self.transform_depth(depth_img)
            ans['depth'] = depth
            ans['depth_paths'] = depth_path
            if self.usefft:
                depth_F = fftshift(fft2(depth.detach().numpy()))
                ans['depth_F'] = depth_F
        
        if self.ir: 
            ir_path = self.ir_paths[index]
            ir_img = Image.open(ir_path).convert('RGB')
            if ir_path.__str__().split('/')[-1].__len__() < 16:
                ir_path = '_'.join(ir_path.__str__().split('/')[-5:])
            else:
                ir_path = ir_path.__str__().split('/')[-1]
            ir = self.transform_ir(ir_img)
            ans['ir'] = ir
            ans['ir_paths'] = ir_path
            if self.usefft:
                ir_F = fftshift(fft2(ir.detach().numpy()))
                ans['ir_F'] = ir_F
        
        if self.dolp: 
            dolp_path = self.dolp_paths[index]
            dolp_img = Image.open(dolp_path).convert('RGB')
            if dolp_path.__str__().split('/')[-1].__len__() < 16:
                dolp_path = '_'.join(dolp_path.__str__().split('/')[-5:])
            else:
                dolp_path = dolp_path.__str__().split('/')[-1]
            dolp = self.transform_dolp(dolp_img)
            ans['dolp'] = dolp
            ans['dolp_paths'] = dolp_path
            if self.usefft:
                dolp_F = fftshift(fft2(dolp.detach().numpy()))
                ans['dolp_F'] = dolp_F

        return ans

    def __len__(self):
        """Return the total number of images in the dataset.

        As we have two datasets with potentially different number of images,
        we take a maximum of
        """
        # return int(self.size*self.limit_ratio)
        return self.size
    def get_category(self):
        self.cat_name = {"物理":0,"真人":1,"数字":2}
        # self.cat_name = {"真人":0,"物理":1,"数字":2}
        self.category = []

        for path in self.color_paths:
            if "geniune_domain" in path:
                self.category.append(self.cat_name["真人"])
                pass
            elif "physical_domain" in path:
                self.category.append(self.cat_name["物理"])
                pass
            elif "digital_domain" in path:
                self.category.append(self.cat_name["数字"])
            else:
                self.category.append(-1)
    
    def vis_res_deal_func(self,data,config=None,model_flag=True):
        
        # T1 = transforms.ToTensor()
        if model_flag is not None:
            T = transforms.ToPILImage()
            # data = T1(data['color'])
            for key in data.keys():
                if key == 'label':
                    continue
                data[key] = cv2.cvtColor(np.asarray(T(data[key].cpu())),cv2.COLOR_RGB2BGR)
        else:
            T = transforms.ToPILImage()
            data = cv2.cvtColor(np.asarray(T(data[config.input].cpu())),cv2.COLOR_RGB2BGR)
        # data[:,:,0] = linear_(data[:,:,0])
        # data[:,:,1] = linear_(data[:,:,1])
        # data[:,:,2] = linear_(data[:,:,2])
        # data[:,:,0],_ = histeq(data[:,:,0])
        # data[:,:,1],data[:,:,2] = data[:,:,0],data[:,:,0]
        return data
