import os
import cv2
import random
import torch
import dlib
import numpy as np
from PIL import Image
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms

def make_dataset(root):
    result = []
    if type(root) is tuple or type(root) is list:
        for r in root:
            result += make_dataset(r)
    else:
        if os.path.exists(root):
            for file in os.listdir(root):
                if file.endswith('png'):
                    result.append(os.path.join(root, file))
    return result


def load_data(image_path, unknown_range):
    from infrastructure.photo_dl.infrastructure import utils
    merge = utils.read(image_path)

    image, alpha, ignore = np.split(merge, 3, 1)
    alpha = cv2.cvtColor(alpha, cv2.COLOR_BGR2GRAY)
    ignore = cv2.cvtColor(ignore, cv2.COLOR_BGR2GRAY)

    image = utils.resize(image, target_h=672)[:576, ...]
    alpha = utils.resize(alpha, target_h=672)[:576, ...]
    ignore = utils.resize(ignore, target_h=672, interpolation=cv2.INTER_NEAREST)[:576, ...]

    ignore[...] = 0  # todo no ignore

    # todo back 0, front 1, unknown 2, ignore 255
    trimap = ignore
    trimap[alpha > min(unknown_range)] = 2
    trimap[alpha > max(unknown_range)] = 1
    # image = utils.merge(image, (trimap != 255).astype(np.float), (128, 128, 128))

    # todo ignore dilate
    ignore_dilated = cv2.dilate((trimap == 255).astype(np.uint8), None, iterations=2)
    trimap[ignore_dilated.astype(np.bool)] = 255
    alpha[trimap == 255] = 0

    # todo random trimap
    trimap_random = trimap.copy()
    unsure = ((alpha > 0) & (alpha < 255)).astype(np.uint8)

    r = random.randint(0, 6)
    if r > 0:
        unsure_dilated = cv2.dilate(unsure, None, iterations=r)
        trimap_random[unsure_dilated.astype(np.bool) & (trimap_random == 0)] = 2
    r = random.randint(0, 6)
    if r > 0:
        unsure_dilated = cv2.dilate(unsure, None, iterations=r)
        trimap_random[unsure_dilated.astype(np.bool) & (trimap_random == 1)] = 2

    return Image.fromarray(image[..., ::-1]), Image.fromarray(alpha / 255.), \
           Image.fromarray(trimap), Image.fromarray(trimap_random)


class SegmentPPAlignImage(Dataset):
    def __init__(self, root, joint_transform=None, path_info=False, unknown_range=(10, 240)):
        self.data = make_dataset(root)
        self.joint_transform = joint_transform
        self.path_info = path_info
        self.unknown_range = unknown_range

    def __getitem__(self, item):
        image_path = self.data[item]
        image, alpha, trimap, trimap_random = load_data(image_path, self.unknown_range)

        if self.joint_transform is not None:
            (image, alpha), (trimap, trimap_random) = self.joint_transform((image, alpha), (trimap, trimap_random))

        result = (image, trimap, trimap_random, alpha)
        if self.path_info:
            result += (image_path,)
        return result

    def __len__(self):
        return len(self.data)

def center_crop(img):
    w, h = img.size
    if h > w:
        crop_box = ((w-h)//2, 0, h-(h-w)//2, h)
    else:
        crop_box = (0, (h-w)//2, w, w-(w-h)//2)
    return img.crop(crop_box)

detector = dlib.get_frontal_face_detector()
def face_bnd_box(img):
    faces = detector(img)
    if len(faces) == 0:
        return None
    face = faces[0]
    # (x, y, x, y)
    return (face.left(), face.top(), face.right(), face.bottom())


def area(bnd):
    return max(0, (bnd[2]-bnd[0]) * (bnd[3]-bnd[1]))

def overlap(bnd_face, bnd_crop):
    bnd_ovl = (max(bnd_face[0], bnd_crop[0]),
               max(bnd_face[1], bnd_crop[1]),
               min(bnd_face[2], bnd_crop[2]),
               min(bnd_face[3], bnd_crop[3])
              )
    return area(bnd_ovl) / area(bnd_face)

def random_crop_params(shape, percentage=0.5):
    h, w = shape
    large_side = max(h, w)
    short_side = min(h, w)
    crop_size = int((percentage + (1-percentage) * random.random()) * max(h, w))
    start = random.randint(0, large_side-crop_size)
    
    overflow_short = -1 if short_side < crop_size else 1
    start_short = random.randint(0, abs(crop_size-short_side)) * overflow_short
    
    # 返回 (x, y, x, y)
    if short_side == h:
        bnd_box = (start, start_short, start+crop_size, start_short+crop_size)
    else:
        bnd_box = (start_short, start, start_short+crop_size, start+crop_size)
        
    assert (bnd_box[3]-bnd_box[1]) == (bnd_box[2]-bnd_box[0])
    return bnd_box

class DatasetMatting(Dataset):
    def __init__(self, root, root_anno, target_size=512, return_fn=False):
        super().__init__()
        self.transform = transforms.Compose([transforms.ToTensor(),
                                            transforms.Normalize([0.5, 0.5, 0.5],
                                                                [0.5, 0.5, 0.5])])
        self.target_size = target_size
        self.return_fn = return_fn
        files_root = os.listdir(root)
        files_root = {f.rsplit('.', 1)[0]: f for f in files_root}
        files_anno = os.listdir(root_anno)
        files_anno = {f.rsplit('.', 1)[0]: f for f in files_anno}
        # files with both image & annotation
        overlap = set(files_root.keys()) & set(files_anno.keys())
        print(len(overlap))
        self.data = []
        for key in overlap:
            self.data.append({'img': os.path.join(root, files_root[key]),
                                'anno': os.path.join(root_anno, files_anno[key])})
        # self.check()

    def check(self):
        for item in self.data:
            img = Image.open(item['img'])
            anno = Image.open(item['anno'])
            h, w = img.size
            h_, w_ = anno.size
            if abs(int(h_ / h * w - w_)) >= 2:
                print(item)

    def __len__(self):
        return len(self.data)

    def __getitem__(self, i):
        img = Image.open(self.data[i]['img']).convert('RGB')
        anno = Image.open(self.data[i]['anno'])
        if img.size != anno.size:
            if img.size[0] > anno.size[0]:
                anno = anno.resize(img.size)
            else:
                img = img.resize(anno.size)
        assert anno.size == img.size
        img = center_crop(img).resize((self.target_size, self.target_size))
        anno = center_crop(anno).resize((self.target_size, self.target_size))
        assert img.size[0] == img.size[1]
        img = self.transform(img)
        anno = np.array(anno)[..., -1].astype(np.float32)/255
        anno = torch.tensor(anno).unsqueeze(0)

        if self.return_fn:
            return img, anno, self.data[i]['anno']
        else:
            return img, anno
        

class DatasetPPM(DatasetMatting):
    def __init__(self, root, root_anno, target_size=512, return_fn=False):
        super().__init__(root, root_anno, target_size=512, return_fn=False)
        invalid_img = []
        for i in range(len(self.data)):# 
            img = cv2.imread(self.data[i]['img'])
            face_box = face_bnd_box(img)
            if face_box is None:
                invalid_img.append(self.data[i])
            else:
                self.data[i]['face'] = face_box
            # if i - len(invalid_img) >= 10: break
        for item in invalid_img:
            self.data.remove(item)

    def __getitem__(self, i):
        img = cv2.imread(self.data[i]['img'])
        anno = cv2.imread(self.data[i]['anno'])
        face_box = self.data[i]['face']

        crop_box = random_crop_params(img.shape[:2])
        retry = 0
        while overlap(face_box, crop_box) < 0.5:
            crop_box = random_crop_params(img.shape[:2])
            retry += 1
        # print('重试', retry)
        
        img = Image.fromarray(img).crop(crop_box).resize((self.target_size, self.target_size))
        anno = Image.fromarray(anno).crop(crop_box).resize((self.target_size, self.target_size))
        img = self.transform(img)
        anno = np.array(anno)[..., -1].astype(np.float32)/255
        trimap = anno.copy()
        trimap[trimap>0.95]

        anno = torch.tensor(anno).unsqueeze(0)

        if self.return_fn:
            return img, anno, self.data[i]['anno']
        else:
            return img, anno

if __name__ == '__main__':
    from tensorboardX import SummaryWriter

    tb_logger = SummaryWriter('./logs/tb_test')
    dataset = DatasetMatting(root='/home/chengk/chk/data/removebg-align/normal3',
                        root_anno='/home/chengk/chk/data/removebg-align/normal3_removebg')
    dataloader = DataLoader(dataset, 1, num_workers=1, drop_last=False, shuffle=False)
    
    step = 0
    for data in dataloader:
        img, alpha = data
        # print(img.size(), alpha.size())
        img_plot = np.concatenate([(img.numpy()+1)/2,
                                    np.concatenate([alpha.numpy()]*3, 1),
                                    ], 0)
        tb_logger.add_images('image pairs', img_plot, step, dataformats='NCHW')
        step += 1
        