from torch.utils.data import Dataset
import os
from PIL import Image
import random
from torchvision import transforms
import torch
import numpy as np

class DataSetGAN(Dataset):
    def __init__(self,image_dir,gt_dir,mode='train',size = 256) -> None:
        super(DataSetGAN,self).__init__()
        
        self.mode = mode
        self.size = size
        self.image_dir = image_dir
        self.gt_dir = gt_dir
        self.images = self.get_image_by_walkdir(image_dir)
        self.gt_style = self.get_image_by_listdir(gt_dir)
        
        
        self.T = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))
        ])
        self.flip = transforms.RandomHorizontalFlip(p=1)
        self.mem = 'gan'
        print('gan:',len(self.images))
    def __len__(self):
        return 10000
    def __getitem__(self, index):
        gt_style_path = random.choice(self.gt_style)
        # print(gt_style_path.split('\\')[-1],self.image_dir)
        image_path = os.path.join(self.image_dir[0],gt_style_path.split('\\')[-1])
        
        gt_style_path = random.choice(self.gt_style)
        image_path = random.choice(self.images)
        
        image = Image.open(image_path).convert('RGB')
        gt = Image.open(gt_style_path).convert('RGB')
        
        image = image.resize(size=(self.size,self.size))
        gt = gt.resize(size=(self.size,self.size))
        
        # if random.random()>0.5:
        #     image = transforms.RandomResizedCrop(self.size)(image)
        
        # if random.random()>0.5:
        #     angle = random.choice([-90,90])
        #     image = image.rotate(angle)
        #     gt = gt.rotate(angle)
        if random.random()>0.5:
            gt = self.flip(gt)
            image = self.flip(image)
        if random.random()>0.5:
            gt = self.flip(gt)
        return self.T(image),self.T(gt),self.mem
         
    @staticmethod
    def get_image_by_listdir(base_dir):
        data = []
        for image_path in os.listdir(base_dir):
            if image_path.endswith('jpg') or image_path.endswith('png') or image_path.endswith('bmp'):
                data.append(os.path.join(base_dir,image_path))
        return data
    @staticmethod
    def get_image_by_walkdir(base_dirs):
        
        if isinstance(base_dirs,str):
        
            data = []
            for root,dir,files in os.walk(base_dirs):
                for image_path in files:
                    if image_path.endswith('jpg') or image_path.endswith('png') or image_path.endswith('bmp'):
                        data.append(os.path.join(root,image_path))
            return data
        elif isinstance(base_dirs,list):
            data = []
            for base_dir in base_dirs:
                for root,dir,files in os.walk(base_dir):
                    for image_path in files:
                        if image_path.endswith('jpg') or image_path.endswith('png') or image_path.endswith('bmp'):
                            data.append(os.path.join(root,image_path))
            return data
        
    # def __getitem__(self, index):
    #     img = Image.open(self.data[index]).convert("RGB")
    #     img = img.resize(self.image_size, Image.BILINEAR)
    #     label = Image.open(self.data[index].replace("CelebA-HQ-img", "mask").replace("jpg", "png")).convert("P")
    #     label = label.resize(self.image_size,Image.BILINEAR)
        
    #     if random.random()>0.5:
    #         angle = random.choice([-90,90])
    #         img = img.rotate(angle)
    #         label = label.rotate(angle)
    #     if random.random()>0.5:
    #         label = self.flip(label)
    #         img = self.flip(img)
        
    #     img = self.transform(img)
    #     label = np.array(label)
        
    #     return img,torch.from_numpy(label),self.mem   
        
class DataSetGAN2(Dataset):
    def __init__(self,image_dir,gt_dir,mode='train',size = 256,pipeline=['transforms.RandomHorizontalFlip']) -> None:
        super(DataSetGAN2,self).__init__()
        
        self.mode = mode
        self.size = size
        self.image_dir = image_dir
        self.gt_dir = gt_dir
        self.images = self.get_image_by_walkdir(image_dir)
        self.gt_style = self.get_image_by_listdir(gt_dir)
        
        
        self.T = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))
        ])
        self.flip = transforms.RandomHorizontalFlip(p=1)
        self.mem = 'gan'
        print('gan:',len(self.images))
    def __len__(self):
        return 10000
    def __getitem__(self, index):
        gt_style_path = random.choice(self.gt_style)
        image_path = random.choice(self.images)
        seglael_path =image_path.replace("CelebA-HQ-img", "mask").replace("jpg", "png")

        
        image = Image.open(image_path).convert('RGB')
        gt = Image.open(gt_style_path).convert('RGB')
        label = Image.open(seglael_path).convert('P')
        
        label = label.resize(size=(self.size,self.size))
        image = image.resize(size=(self.size,self.size))
        gt = gt.resize(size=(self.size,self.size))
        label = np.array(label)
        # # if random.random()>0.5:
        # #     image = transforms.RandomResizedCrop(self.size)(image)
        
        # # if random.random()>0.5:
        # #     angle = random.choice([-90,90])
        # #     image = image.rotate(angle)
        # #     gt = gt.rotate(angle)
        # if random.random()>0.5:
        #     gt = self.flip(gt)
        #     image = self.flip(image)
        # if random.random()>0.5:
            # gt = self.flip(gt)
        return self.T(image),self.T(gt),torch.from_numpy(label),self.mem
         
    @staticmethod
    def get_image_by_listdir(base_dir):
        data = []
        for image_path in os.listdir(base_dir):
            if image_path.endswith('jpg') or image_path.endswith('png') or image_path.endswith('bmp'):
                data.append(os.path.join(base_dir,image_path))
        return data
    @staticmethod
    def get_image_by_walkdir(base_dirs):
        
        if isinstance(base_dirs,str):
        
            data = []
            for root,dir,files in os.walk(base_dirs):
                for image_path in files:
                    if image_path.endswith('jpg') or image_path.endswith('png') or image_path.endswith('bmp'):
                        data.append(os.path.join(root,image_path))
            return data
        elif isinstance(base_dirs,list):
            data = []
            for base_dir in base_dirs:
                for root,dir,files in os.walk(base_dir):
                    for image_path in files:
                        if image_path.endswith('jpg') or image_path.endswith('png') or image_path.endswith('bmp'):
                            data.append(os.path.join(root,image_path))
            return data
        
        
        
        
def get_data(base_dir):
        data = []
        for root, dirs, files in os.walk(base_dir):
            for f in files:
                if f.endswith(".jpg"):
                    data.append(os.path.join(root, f))
        return data

class FaceParsingData(Dataset):
    def __init__(self, data_dir, mode="train",size=256):
        super(FaceParsingData, self).__init__()
        self.mode = mode
        self.data = get_data(data_dir)

        self.image_size = [size, size]

        self.transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
        self.flip = transforms.RandomHorizontalFlip(p=1)
        self.mem = 'seg'
        
        print("seg:",len(self.data))
    def __len__(self):
        return 10000
    def __getitem__(self, index):
        img = Image.open(self.data[index]).convert("RGB")
        img = img.resize(self.image_size, Image.BILINEAR)
        label = Image.open(self.data[index].replace("CelebA-HQ-img", "mask").replace("jpg", "png")).convert("P")
        label = label.resize(self.image_size,Image.BILINEAR)
        
        if random.random()>0.5:
            angle = random.choice([-90,90])
            img = img.rotate(angle)
            label = label.rotate(angle)
        if random.random()>0.5:
            label = self.flip(label)
            img = self.flip(img)
        
        img = self.transform(img)
        label = np.array(label)
        
        return img,torch.from_numpy(label),self.mem   
        
class DataSetFaceLandMark(Dataset):
    def __init__(self,label_file,data_prefix='facelandmark/images',mode='train',size=256) -> None:
        super(DataSetFaceLandMark,self).__init__()
        with open(label_file,'r',encoding='utf-8') as f:
            lines = f.readlines()
        self.data_prefix= data_prefix
        self.lines = lines
        
        self.data_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
        ])
        
        self.mem = 'facelandmark'
        self.size = size 
        print('facelandmark:',len(self.lines))
    def __len__(self):
        return 10000
    
    def __getitem__(self,index):
        line = self.lines[index]
        line = line.strip('\n')
        line = line.split(' ')
        image_path = os.path.join(self.data_prefix,line[0])
        image = Image.open(image_path).convert("RGB")
        labels=list(map(float,line[1:]))
        kp =labels[:196]
        
        # zoom
     
        
        box = labels[196:]
        x1,y1,x2,y2 = box[0],box[1],box[2],box[3]
        
        image= image.crop((x1,y1,x2,y2))
        kp[0::2] = [x-x1 for x in kp[0::2]]   
        kp[1::2] = [y-y1 for y in kp[1::2]] 

        
        width = image.width
        height = image.height
        
        kp[0::2] = [x/width for x in kp[0::2]]
        kp[1::2] = [y/height for y in kp[1::2]]
        new_image = Image.new("RGB",(self.size,self.size),(128,128,128))
        ratio = width/height
        
        if height>width:
            height = self.size
            width =int(ratio*self.size)
        else:
            width = self.size
            height = int(self.size/ratio)
        image = image.resize((width,height))
        
        new_image.paste(image,(0,0))
        
        return self.data_transform(new_image),torch.from_numpy(np.array(kp)).float(),self.mem

    def zoom_the_eye(self,kp):
        # left eye
        left_index  = [60,61,62,63,64,65,66,67]
        # right_eye
        right_index = [68,69,70,71,72,73,74,75]        
        
        left_center = 96
        right_center = 97
        
        left_cx = kp[left_center*2+0]
        left_cy = kp[left_center*2+1]
        
        for li in left_index:
            kp_x = kp[li*2+0]
            kp_y = kp[li*2+1]
            kp_arrow = np.array([kp_x-left_cx,kp_y-left_cy])
            kp_arrow_norm = np.linalg.norm(kp_arrow)
            kp_unit = kp_arrow/kp_arrow_norm
            unit = np.array([0,1])
        
        
def collate_fn(batch):
    mem = {}
    for data,gt,m in batch:
        if m not in mem.keys():
            mem[m]={'data':[data],'gt':[gt]}
        else:
            mem[m]['data'].append(data)
            mem[m]['gt'].append(gt)
    
    for k in mem.keys():
        mem[k]['data']=torch.stack(mem[k]['data'])
        mem[k]['gt'] = torch.stack(mem[k]['gt'])

        # if mem[k]['data'].shape[0]>=BATCH_SIZE:
        #     mem[k]['data'] = mem[k]['data'][:BATCH_SIZE]
        #     mem[k]['gt'] = mem[k]['gt'][:BATCH_SIZE]
        # else:
        #     mem[k]['data'] = torch.cat([mem[k]['data'],mem[k]['data'],mem[k]['data']],dim=0)[:BATCH_SIZE]
        #     mem[k]['gt'] = torch.cat([mem[k]['gt'],mem[k]['gt'],mem[k]['gt']],dim=0)[:BATCH_SIZE]
    return mem 


def collate_fn2(batch):
    mem = {}
    for data,gt,other,m in batch:
        if m not in mem.keys():
            mem[m]={'data':[data],'gt':[gt],'other':[other]}
        else:
            mem[m]['data'].append(data)
            mem[m]['gt'].append(gt)
            mem[m]['other'].append(other)
    
    for k in mem.keys():
        mem[k]['data']=torch.stack(mem[k]['data'])
        mem[k]['gt'] = torch.stack(mem[k]['gt'])
        mem[k]['other'] = torch.stack(mem[k]['other'])
        # if mem[k]['data'].shape[0]>=BATCH_SIZE:
        #     mem[k]['data'] = mem[k]['data'][:BATCH_SIZE]
        #     mem[k]['gt'] = mem[k]['gt'][:BATCH_SIZE]
        # else:
        #     mem[k]['data'] = torch.cat([mem[k]['data'],mem[k]['data'],mem[k]['data']],dim=0)[:BATCH_SIZE]
        #     mem[k]['gt'] = torch.cat([mem[k]['gt'],mem[k]['gt'],mem[k]['gt']],dim=0)[:BATCH_SIZE]
    return mem 
