
import imp
import cv2
import numpy as np
import os
import random
import torch
from my_py_toolkit.cv.blend_with_landmark import self_blend, get_trans
from my_py_toolkit.cv.self_blend_image import gen_img, crop_face
from my_py_toolkit.file.file_toolkit import *
from torch.utils.data import DataLoader, Dataset

def get_collate_fn(mode='train', trans=None, mask_trans=None, imsize=None):
    
    def collate_fn(data_batch):
        paths, labels, landmarks = [], [], []
        for p, la, lks in data_batch:
            paths.append(p)
            labels.append(la)
            landmarks.append(np.asarray(lks))
            
        imgs = [cv2.imread(p) for p in paths]
        if imsize:
            imgs = [cv2.resize(img, imsize) for img in imgs]
        if mode == 'train':
            faka_image = [self_blend(img, landmarks[i], trans, mask_trans) 
                          for i, img in enumerate(imgs) if landmarks[i].tolist()]
            imgs = imgs + faka_image
            labels = labels + [1] * len(faka_image)
        # print(f'imgs shapes: {[i.shape for i in imgs]}')
        imgs = torch.tensor(imgs, dtype=torch.float)
        imgs = imgs.permute(0, 3, 1, 2)
        labels = torch.tensor(labels, dtype=torch.long)
        return imgs, labels
        
    return collate_fn   

def get_collate_fn_sbi(mode='train', imsize=None):
    
    def collate_fn(data_batch):
        img_handled, labels_handled = [], []
        
        paths, labels, landmarks, faces_box = [], [], [], []
        for p, la, lks, box in data_batch:
            paths.append(p)
            labels.append(la)
            landmarks.append(np.asarray(lks))
            faces_box.append(np.asarray(box))
        
        imgs = [cv2.imread(p) for p in paths]
        if mode == 'train':
            for i, img in enumerate(imgs):
                if landmarks[i].tolist():
                    img_r, img_f = gen_img(img, landmarks[i], faces_box[i], mode, imsize)
                    img_handled.append(img_r)
                    labels_handled.append(0)
                    img_handled.append(img_f)
                    labels_handled.append(1)
            
        else:
            for i, img in enumerate(imgs):
                if faces_box[i].tolist():
                    img, *_ = crop_face(img, landmarks[i], faces_box[i], margin=False,
                                        crop_by_bbox=True, abs_coord=True, phase=mode)
                    img=cv2.resize(img, imsize, interpolation=cv2.INTER_LINEAR).astype('float32')/255    
                    img=img.transpose((2,0,1))
                    img_handled.append(img)
                    labels_handled.append(labels[i])
            
        # print(f'imgs shapes: {[i.shape for i in imgs]}')
        img_handled = torch.tensor(img_handled, dtype=torch.float)
        # img_handled = img_handled.permute(0, 3, 1, 2)
        labels_handled = torch.tensor(labels_handled, dtype=torch.long)
        return img_handled, labels_handled
        
    return collate_fn

class PSDatasethj(Dataset):
    def __init__(self, data_dir, landmarks_path):
        super().__init__()
        self.data_dir = data_dir
        self.landmarks_path = landmarks_path
        
        
        self.files = []
        self.labels = []
        self.landmarks = [] #readjson(landmarks_path)
        self.__read_data()
    
    def __read_data(self):
        landmarks = []
        if self.landmarks_path:
            landmarks = readjson(self.landmarks_path)
        for lable, name in enumerate(['real', 'fake']):
            dir = f'{self.data_dir}/{name}'
            if os.path.exists(dir):
                sub_dirs = get_sub_dirs(dir)
                self.read_files(landmarks, lable, sub_dirs)

    def read_files(self, landmarks, lable, sub_dirs):
        for sub_dir in sub_dirs:
            files_sub = get_file_paths(sub_dir)
            if lable == 0:
                random.shuffle(files_sub)
                files_sub = files_sub[:10]
            self.files.extend(files_sub)
            self.labels.extend([lable] * len(files_sub))
            self.landmarks.extend(self.__get_landmarks(files_sub, landmarks))
                
    def __get_landmarks(self, files, landmarks):
        res = []
        print(f'nums landmarks: {len(landmarks)}')
        for f in files:
            f_name = f[f.index(self.data_dir) + len(self.data_dir):]
            f_name = f_name.strip('\/')
            res.append(landmarks.get(f_name, []))

        return res   
             
    def __getitem__(self, index):
        return self.files[index], self.labels[index], self.landmarks[index]
    
    def __len__(self):
        return len(self.files)
    
    
        
        
class PSDatasetSBI(Dataset):
    def __init__(self, data_dir, landmarks_path, faces_path, phase='train'):
        super().__init__()
        self.data_dir = data_dir
        self.landmarks_path = landmarks_path
        self.faces_path = faces_path
        self.phase = phase
        
        
        self.files = []
        self.labels = []
        self.landmarks = [] #readjson(landmarks_path)
        self.faces = []
        self.__read_data()
        print(f'landmarks nums: {len([v for v in self.landmarks if v])}')
    
    # def __read_data(self):
    #     landmarks = readjson(self.landmarks_path)
    #     faces = readjson(self.faces_path)
    #     for lable, name in enumerate(['real', 'fake']):
    #         sub_dir = f'{self.data_dir}/{name}'
    #         if os.path.exists(sub_dir):
    #             files_sub = get_file_paths(sub_dir)
    #             self.files.extend(files_sub)
    #             self.labels.extend([lable] * len(files_sub))
    #             self.landmarks.extend(self.__get_landmarks(files_sub, landmarks))
    #             self.faces.extend(self.__get_from_dict(files_sub, faces))
    #     self.faces = self.adapt_faces_box(self.faces)
    
    def __read_data(self):
        landmarks = []
        faces = readjson(self.faces_path)
        if self.landmarks_path:
            landmarks = readjson(self.landmarks_path)
            
        for lable, name in enumerate(['real', 'fake']):
            dir = f'{self.data_dir}/{name}'
            if os.path.exists(dir):
                sub_dirs = get_sub_dirs(dir)
                self.read_files(landmarks, lable, faces, sub_dirs)
                
        self.faces = self.adapt_faces_box(self.faces)

    def read_files(self, landmarks, lable, faces, sub_dirs):
        for sub_dir in sub_dirs:
            files_sub = get_file_paths(sub_dir)
            if self.phase == 'train':
                random.shuffle(files_sub)
                files_sub = files_sub[:10]
            self.files.extend(files_sub)
            self.labels.extend([lable] * len(files_sub))
            self.landmarks.extend(self.__get_landmarks(files_sub, landmarks))
            self.faces.extend(self.__get_from_dict(files_sub, faces))
           
    

    def adapt_faces_box(self, faces):
        res = []
        for box in faces:
            if box:
                left, right, top, bottom = box
                res.append([
                            [left, top], 
                            [right, bottom]
                            ])
            else:
                res.append([])
        return res
                
    def __get_landmarks(self, files, landmarks):
        res = []
        for f in files:
            # res.append(landmarks.get(get_file_name(f), []))
            # # todo: 读取怀建数据集
            f_name = f[f.index(self.data_dir) + len(self.data_dir):]
            f_name = f_name.strip('\/')
            res.append(landmarks.get(f_name, []))

        return res   
    def __get_from_dict(self, files, values):
        res = []
        for f in files:
            # res.append(landmarks.get(get_file_name(f), []))
            # # todo: 读取怀建数据集
            f_name = f[f.index(self.data_dir) + len(self.data_dir):]
            f_name = f_name.strip('\/')
            res.append(values.get(f_name, []))
        return res
             
    def __getitem__(self, index):
        return (self.files[index], self.labels[index], 
                self.landmarks[index], self.faces[index])
    
    def __len__(self):
        return len(self.files)
        
        
        
class PSDataset(Dataset):
    def __init__(self, data_dir, landmarks_path):
        super().__init__()
        self.data_dir = data_dir
        self.landmarks_path = landmarks_path
        
        
        self.files = []
        self.labels = []
        self.landmarks = [] #readjson(landmarks_path)
        self.__read_data()
        print(f'landmarks nums: {len([v for v in self.landmarks if v])}')
    
    def __read_data(self):
        landmarks = readjson(self.landmarks_path)
        for lable, name in enumerate(['real', 'fake']):
            sub_dir = f'{self.data_dir}/{name}'
            if os.path.exists(sub_dir):
                files_sub = get_file_paths(sub_dir)
                self.files.extend(files_sub)
                self.labels.extend([lable] * len(files_sub))
                self.landmarks.extend(self.__get_landmarks(files_sub, landmarks))
                
    def __get_landmarks(self, files, landmarks):
        res = []
        for f in files:
            # res.append(landmarks.get(get_file_name(f), []))
            # # todo: 读取怀建数据集
            f_name = f[f.index(self.data_dir) + len(self.data_dir):]
            f_name = f_name.strip('\/')
            res.append(landmarks.get(f_name, []))

        return res   
             
    def __getitem__(self, index):
        return self.files[index], self.labels[index], self.landmarks[index]
    
    def __len__(self):
        return len(self.files)
        

def get_dataloader(data_dir, landmarks_path, batch_size, img_size, crop_scale, mode='train'):
    trans, mask_trans = get_trans(img_size[0], img_size[1], crop_scale)
    collate_fn = get_collate_fn(mode, trans, mask_trans, img_size)
    dataset = PSDataset(data_dir, landmarks_path)
    dl = DataLoader(dataset, batch_size, shuffle=True, collate_fn=collate_fn)
    return dl

def get_dataloader_hj(data_dir, landmarks_path, batch_size, img_size, crop_scale, mode='train'):
    trans, mask_trans = get_trans(img_size[0], img_size[1], crop_scale)
    collate_fn = get_collate_fn(mode, trans, mask_trans, img_size)
    dataset = PSDatasethj(data_dir, landmarks_path)
    dl = DataLoader(dataset, batch_size, shuffle=True, collate_fn=collate_fn)
    return dl



def get_dataloader_sbi(data_dir, landmarks_path, faces_path, batch_size, img_size, mode='train'):
    
    collate_fn = get_collate_fn_sbi(mode, img_size)
    dataset = PSDatasetSBI(data_dir, landmarks_path, faces_path, phase=mode)
    dl = DataLoader(dataset, batch_size, shuffle=True, collate_fn=collate_fn)
    return dl