import os,shutil
import numpy as np
import torch
import torch.utils.data
from PIL import Image
import sys,glob
import xml.etree.ElementTree as ET
import albumentations as albu
import cv2, tqdm
from albumentations.pytorch import ToTensorV2
import matplotlib.pyplot as plt
from torchvision.transforms import *
import matplotlib.patches as patches
from PIL import Image, ImageOps
import random
from skimage.measure import label, regionprops
from torchvision.transforms import ToTensor


class pad_to_square(object):
    def __call__(self, image):
        width, height = image.size

        if height >= width:
            delta = height - width
            padding = (0, 0, delta, 0)
            new_im = ImageOps.expand(image, padding)
        else:
            delta = width - height
            padding = (0, 0, 0, delta)
            new_im = ImageOps.expand(image, padding)
        return new_im


class random_black_center_patch(object):
    def __call__(self, image, targets):
        boxes = targets["boxes"].numpy()
        indices = random.sample(list(range(len(boxes))),k=random.randint(1, len(boxes)))
        if random.random() < 0.6:
            for i in indices:
                box = boxes[i]
                xmin, ymin, xmax, ymax = box

                ## get boxes height and width
                width = xmax - xmin
                height = ymax - ymin

                #calculate box center_x and center_y
                center_x, center_y = (xmax + xmin) / 2, (ymax + ymin) / 2

                #calculate patch_size
                ratio = random.uniform(0.1, 0.6)
                start_x = int(center_x - width * ratio/2)
                end_x = int(center_x + width * ratio/2)
                
                start_y = int(center_y - height * ratio/2)
                end_y = int(center_y + height * ratio/2)
                black_mask = Image.new("RGB", (end_x-start_x, end_y-start_y))
                image.paste(black_mask, (start_x,start_y))
        return image
                
        
        
def albu_transform(image, boxes, labels):
    aug = albu.Compose(transforms=[
        albu.HorizontalFlip(p=0.5),
        albu.VerticalFlip(p=0.5),
        albu.RandomScale(scale_limit=0.2, p=0.5),  ## [1-scale, 1+ scale]
        albu.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=30, val_shift_limit=20, always_apply=False, p=0.5),
        albu.Rotate(limit=20, interpolation=cv2.INTER_LINEAR, border_mode=cv2.BORDER_CONSTANT, always_apply=False, p=0.5, ),
        albu.RandomSizedBBoxSafeCrop(width=1000, height=1000, erosion_rate=0.2),
        albu.RandomShadow(shadow_roi=(0, 0.5, 1, 1), num_shadows_lower=1, num_shadows_upper=2, shadow_dimension=5, always_apply=False,p=0.5,),
        albu.RGBShift(p=0.5),
        albu.RandomGamma(gamma_limit=(80, 120), eps=1e-7, always_apply=False, p=0.5),
        albu.RandomRotate90(),
        albu.CoarseDropout(max_holes=8, max_height=200, max_width=200, min_holes=1, min_height=50, min_width=50, fill_value=0, always_apply=False, p=1,),
        albu.RandomRain(),
        albu.RandomCrop(height=1000,width=1000),
        albu.Blur(blur_limit=6, p=0.5),
        albu.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, brightness_by_max=True,  always_apply=False, p=1),
        albu.CLAHE(p=0.5),

        ## normalize by 255 and subtract mean
        albu.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0, always_apply=False, p=1.0),
        ToTensorV2(),
    ],

        ## bbox_params中的参数，尤其要注意 format的选择
        # "coco": [xmin, ymin, width, height],  绝对坐标
        # "pascal_voc": [xmin, ymin, xmax, ymax]  绝对坐标
        # "yolo" ：`[x, y, width, height]`, e.g. [0.1, 0.2, 0.3, 0.4];  相对坐标
        # `x`, `y` - normalized bbox center; `width`, `height` - normalized bbox width and height.
        # "albumentations" :[xmin, ymin, xmax, ymax] 相对坐标；in other words: [x_min, y_min, x_max, y_max]`, e.g. [0.2, 0.3, 0.4, 0.5].

        bbox_params={'format': 'pascal_voc', 'min_area': 0, 'min_visibility': 0, 'label_fields': ['category_id']}
    )

    ## augment image
    annotations = {'image': image, 'bboxes': boxes.copy().tolist(),
                   'category_id': labels.copy()}  ### labels的数量必须同boxes的数量一一对应
    transform = aug(**annotations)
    aug_img = transform["image"]
    aug_boxes = np.array(transform["bboxes"])
    category_id = np.array(transform["category_id"])

    return aug_img, aug_boxes, category_id


class FoodDataset(torch.utils.data.Dataset):
    CLASSES = (
        "__background__ ",
        "food"
    )

    def __init__(self, data_dir, split, size=512, transform=None):
        self.root = data_dir
        self.image_set = split
        self.transform = transform
        self.split = split
        self.size = size

        self.images = glob.glob(os.path.join(self.root, "JPEGImages/*.jpg"))
        if self.split == "train":
            self.annos = [p.replace(".jpg", ".xml").replace("JPEGImages", "Annotations") for p in self.images]

        cls = FoodDataset.CLASSES   #获取所有的类别，第一个为 __background__
        self.class_to_ind = dict(zip(cls, range(len(cls))))
        self.categories = dict(zip(range(len(cls)), cls))

    def __getitem__(self, index):
        src = Image.open(self.images[index], mode="r").convert("RGB")

        if self.split == "train":
            target = self.get_groundtruth(index)

            # color augument
            # src = ColorJitter(brightness=0.4, contrast=0.2, saturation=0.2, hue=0)(src)

            src = random_black_center_patch()(src, target)

            #other augument
            src = self.albu_transform_tr(src)

            fig, ax = plt.subplots(1)
            ax.imshow(np.array(src))
            for b in target["boxes"].numpy():
                xmin, ymin, xmax, ymax = b
                rect = patches.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, linewidth=2, edgecolor='r',facecolor='none')  # 随机颜色
                ax.add_patch(rect)
            plt.show()

            #second transofrm
            if self.transform:
                img, target = self.transform(target)
            return img, target

        else:
            target = None
            if self.transform:
                img, target = self.transform(src, target)
            return src, img, os.path.basename(self.images[index])

    def __len__(self):
        return len(self.images)

    def albu_transform_tr(self, image):
        """ augument trainging image"""
        image = np.asarray(image)
        augument = albu.Compose(
            [
                # albu.OneOf([
                #     albu.VerticalFlip(),
                #     albu.HorizontalFlip(),
                #     albu.Transpose()
                # ], p=0.5),

                albu.OneOf([
                    albu.IAAAdditiveGaussianNoise(loc=0, scale=(0.01 * 255, 0.05 * 255), per_channel=False, always_apply=False, p=0.5),
                    albu.GaussNoise(var_limit=(10.0, 20.0), mean=0, always_apply=False, p=0.5),
                ], p=0.2),

                albu.OneOf([
                    albu.MotionBlur(blur_limit=(3), p=.2),
                    albu.MedianBlur(blur_limit=3, p=0.1),
                    albu.Blur(blur_limit=3, p=0.1),
                ], p=0.2),

                # albu.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
                # albu.OneOf([
                #     albu.OpticalDistortion(p=0.3),
                #     albu.GridDistortion(p=.1),
                #     albu.IAAPiecewiseAffine(p=0.3), ],
                #     p=0.2),

                albu.OneOf([
                    albu.IAASharpen(),

                    albu.IAAEmboss(),  # 浮雕效果
                    # albu.RandomBrightnessContrast(),
                ], p=0.3),

                # albu.OneOf([
                #     albu.RandomSnow(),
                #     albu.RandomFog()
                # ]),
                albu.RandomBrightnessContrast(brightness_limit=0.3, contrast_limit=0.1, brightness_by_max=True, always_apply=False, p=0.5),
                # albu.HueSaturationValue(hue_shift_limit=20, sat_shift_limit=30, val_shift_limit=20, always_apply=False, p=0.5),

                # albu.Resize(height=self.size[0], width=self.size[1]),
                # albu.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0,
                #                always_apply=False, p=1.0),
                # ToTensorV2(),
            ]
        )
        aug_image = augument(image=image)['image']
        aug_image = Image.fromarray(aug_image)
        return aug_image

    def get_groundtruth(self, index):
        anno_path = self.annos[index]
        anno = ET.parse(anno_path).getroot()
        img_info = self.get_img_info(index)
        height, width = img_info["height"], img_info["width"]
        anno = self._preprocess_annotation(anno, height, width)
        return anno

    def _preprocess_annotation(self, target, height, width):
        boxes = []
        gt_classes = []
        difficult_boxes = []
        # TO_REMOVE = 1
        file_name = target.find("filename").text  #no annotation: 2019_05_08_16_23_01_577_roi_crop.jpg

        for obj in target.iter("object"):
            difficult = int(obj.find("difficult").text) == 1
            if not self.keep_difficult and difficult:
                continue
            name = obj.find("name").text.lower().strip()
            bb = obj.find("bndbox")
            # Make pixel indexes 0-based
            # Refer to "https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/pascal_voc.py#L208-L211"
            xmin = int(bb.find("xmin").text)
            ymin = int(bb.find("ymin").text)
            xmax = int(bb.find("xmax").text)
            ymax = int(bb.find("ymax").text)

            box = [xmin-3, ymin-3, xmax+3, ymax+3]

            boxes.append(box)
            gt_classes.append(1)  #self.class_to_ind[name]
            difficult_boxes.append(difficult)

        boxes = np.array(boxes)
        # try:
        boxes[boxes[:, 0] < 0, 0] = 0
        boxes[boxes[:, 1] < 0, 1] = 0
        boxes[boxes[:, 2] >= width,  2] = width - 1
        boxes[boxes[:, 3] >= height, 3] = height - 1
        # except:
        #     print(file_name)

        res = {"boxes": torch.tensor(boxes, dtype=torch.float32),
               "labels": torch.tensor(gt_classes),
               }
        return res

    def get_img_info(self, index):
        # anno = ET.parse(self.annos[index]).getroot()
        # size = anno.find("size")
        # im_info = tuple(map(int, (size.find("height").text, size.find("width").text)))
        src = Image.open(self.images[index], mode="r")
        width, height = src.size
        del src
        return {"height": height, "width": width}

    def map_class_id_to_class_name(self, class_id):
        return FoodDataset.CLASSES[class_id]


class FoodDataset_with_mask(torch.utils.data.Dataset):
    CLASSES = (
        "__background__ ",
        "food"
    )

    def __init__(self, data_dir, split, size=512, use_difficult=False, transforms=None):
        self.root = data_dir
        self.image_set = split
        self.keep_difficult = use_difficult
        self.transforms = transforms
        self.split = split
        self.size = size

        self.images = glob.glob(os.path.join(self.root, "JPEGImages/*.jpg"))
        if self.split == "train":
            self.annos = [p.replace(".jpg", ".xml").replace("JPEGImages", "Annotations") for p in self.images]

        cls = FoodDataset.CLASSES   #获取所有的类别，第一个为 __background__
        self.class_to_ind = dict(zip(cls, range(len(cls))))
        self.categories = dict(zip(range(len(cls)), cls))

    def transform_train(self,image):
        trans = Compose([#pad_to_square(),
                         # Resize(size=self.size),
                         ColorJitter(brightness=0.4, contrast=0.2, saturation=0.2, hue=0)
        ])
        return trans(image)


    def __getitem__(self, index):
        src = Image.open(self.images[index], mode="r").convert("RGB")

        # target = self.get_groundtruth(index)
        # img, target = self.transforms(src, target)

        if self.split == "train":
            target = self.get_groundtruth(index)

            # color augument
            src = random_black_center_patch()(src, target)

            #other augument
            src = self.albu_transform_tr(src)

            # fig, ax = plt.subplots(1,2)
            # ax[0].imshow(np.array(src))
            # ax[1].imshow(target["masks"].numpy()[0])
            # for b in target["boxes"].numpy():
            #     xmin, ymin, xmax, ymax = b
            #     rect = patches.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, linewidth=2, edgecolor='r',facecolor='none')  # 随机颜色
            #     ax[0].add_patch(rect)
            # plt.show()

            #second transofrm
            img, target = self.transforms(src, target)
            return img, target

        else:
            target = None
            img,target = self.transforms(src,target)
            return src, img, os.path.basename(self.images[index])

    def __len__(self):
        return len(self.images)

    def albu_transform_tr(self, image):
        """ augument trainging image"""
        image = np.asarray(image)
        augument = albu.Compose(
            [
                # albu.OneOf([
                #     albu.VerticalFlip(),
                #     albu.HorizontalFlip(),
                #     albu.Transpose()
                # ], p=0.5),

                albu.OneOf([
                    albu.IAAAdditiveGaussianNoise(loc=0, scale=(0.01 * 255, 0.05 * 255), per_channel=False, always_apply=False, p=0.5),
                    albu.GaussNoise(var_limit=(10.0, 20.0), mean=0, always_apply=False, p=0.5),
                ], p=0.2),

                albu.OneOf([
                    albu.MotionBlur(blur_limit=(3), p=.2),
                    albu.MedianBlur(blur_limit=3, p=0.1),
                    albu.Blur(blur_limit=3, p=0.1),
                ], p=0.2),

                # albu.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
                # albu.OneOf([
                #     albu.OpticalDistortion(p=0.3),
                #     albu.GridDistortion(p=.1),
                #     albu.IAAPiecewiseAffine(p=0.3), ],
                #     p=0.2),

                albu.OneOf([
                    albu.IAASharpen(),

                    albu.IAAEmboss(),  # 浮雕效果
                    # albu.RandomBrightnessContrast(),
                ], p=0.3),

                # albu.OneOf([
                #     albu.RandomSnow(),
                #     albu.RandomFog()
                # ]),
                albu.RandomBrightnessContrast(brightness_limit=0.3, contrast_limit=0.1, brightness_by_max=True, always_apply=False, p=0.5),
                # albu.HueSaturationValue(hue_shift_limit=20, sat_shift_limit=30, val_shift_limit=20, always_apply=False, p=0.5),

                # albu.Resize(height=self.size[0], width=self.size[1]),
                # albu.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0,
                #                always_apply=False, p=1.0),
                # ToTensorV2(),
            ]
        )
        aug_image = augument(image=image)['image']
        aug_image = Image.fromarray(aug_image)
        return aug_image

    def get_groundtruth(self, index):
        anno_path = self.annos[index]
        anno = ET.parse(anno_path).getroot()
        img_info = self.get_img_info(index)
        height, width = img_info["height"], img_info["width"]
        anno = self._preprocess_annotation(anno, height, width)
        return anno

    def _preprocess_annotation(self, target, height, width):
        boxes = []
        gt_classes = []
        difficult_boxes = []
        # TO_REMOVE = 1
        file_name = target.find("filename").text  #no annotation: 2019_05_08_16_23_01_577_roi_crop.jpg
        masks = []
        
        for obj in target.iter("object"):
            difficult = 0
            name = obj.find("name").text.lower().strip()
            bb = obj.find("polygen")
            # Make pixel indexes 0-based
            # Refer to "https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/pascal_voc.py#L208-L211"
            # try:
            points_x = bb.find("points_x").text
            points_y = bb.find("points_y").text

            if points_x.endswith(","):
                points_x = points_x[:-1]
            if points_y.endswith(","):
                points_y = points_y[:-1]

            points_x = [float(x) for x in points_x.strip().split(",")]
            points_y = [float(y) for y in points_y.strip().split(",")]
            # except:
            #     print(bb.find("points_x").text)
            xmin = min(points_x)
            xmax = max(points_x)
            ymin = min(points_y)
            ymax = max(points_y)

            pts = np.vstack((points_x, points_y)).transpose().astype(np.int)
            box = [xmin-3, ymin-3, xmax+3, ymax+3]
            ## fillPoly
            mask = np.zeros((height, width), dtype=np.uint8)
            cv2.fillPoly(mask, [pts], 1)
            masks.append(mask)
            boxes.append(box)
            gt_classes.append(1)  #self.class_to_ind[name]
            difficult_boxes.append(difficult)

        boxes = np.array(boxes)
        # try:
        boxes[boxes[:, 0] < 0, 0] = 0
        boxes[boxes[:, 1] < 0, 1] = 0
        boxes[boxes[:, 2] >= width,  2] = width - 1
        boxes[boxes[:, 3] >= height, 3] = height - 1
        # except:
        #     print(file_name)
        masks = np.stack(masks,axis=0)
        res = {"boxes": torch.tensor(boxes, dtype=torch.float32),
               "labels": torch.tensor(gt_classes,dtype=torch.int64),
               "masks": torch.tensor(masks,dtype=torch.uint8)
               }
        return res

    def get_img_info(self, index):
        # anno = ET.parse(self.annos[index]).getroot()
        # size = anno.find("size")
        # im_info = tuple(map(int, (size.find("height").text, size.find("width").text)))
        src = Image.open(self.images[index], mode="r")
        width, height = src.size
        del src
        return {"height": height, "width": width}

    def map_class_id_to_class_name(self, class_id):
        return FoodDataset.CLASSES[class_id]

if __name__ == "__main__":
    import shutil, glob, tqdm, random
    # src_path = "/media/retoo/RetooDisk/wanghui/Data/Food/混合训练数据/"
    # img_names = [os.path.basename(p)[:-4] for p in glob.glob(src_path+"JPEGImages/*.jpg")]
    # xml_names = [os.path.basename(p)[:-4] for p in glob.glob(src_path+"Annotations/*.xml")]
    # for x in xml_names:
    #     if not x in img_names:
    #         os.remove(src_path+"Annotations/%s.xml"%x)
    # src_path = "/media/retoo/RetooDisk/wanghui/Data/character/ocr_roi/"
    # img_path = glob.glob(src_path+"*.jpg")
    # img_path = random.choices(img_path, k=10000)
    # xml_path = [p.replace(".jpg", ".xml") for p in img_path]
    # for i, p in tqdm.tqdm(zip(img_path, xml_path), total=len(img_path)):
    #     if os.path.exists(i) and os.path.exists(p):
    #         name = os.path.basename(i)
    #         shutil.copy2(i, "/media/retoo/RetooDisk/wanghui/Data/character/train_hecheng/JPEGImages/%s"%name)
    #         shutil.copy2(p,"/media/retoo/RetooDisk/wanghui/Data/character/train_hecheng/Annotations/%s"%name.replace(".jpg", ".xml"))

    path = "/media/retoo/RetooDisk/wanghui/Data/Food/0624_U2NET_TEST_DATA"

    # for idx, p in enumerate(glob.glob(path+"/JPEGImages/*.jpg")):
    #     if not os.path.exists(p.replace(".jpg", ".xml").replace("JPEGImages", "Annotations")):
    #         print("img_path = ", p)
    #         # os.remove(p)
    #
    # for idx, p in enumerate(glob.glob(path+"/Annotations/*.xml")):
    #     if not os.path.exists(p.replace(".xml", ".jpg").replace("Annotations", "JPEGImages")):
    #         print("xml_path = ", p)
    #         os.remove(p)
    #     try:
    #         anno = ET.parse(p).getroot()
    #
    #         if anno.find("object") == None:
    #             print(idx, "\t", p)
    #             img = p.replace(".xml", ".jpg").replace("Annotations", "JPEGImages")
    #             if os.path.exists(p) and os.path.exists(img):
    #                 print(p)
    #                 # os.remove(p)
    #                 # os.remove(img)
    #     except:
    #         img = p.replace(".xml", ".jpg").replace("Annotations", "JPEGImages")
    #         if os.path.exists(p) and os.path.exists(img):    #
    #             print(p)
                # os.remove(p)
                # os.remove(img)
    #
    # for p in tqdm.tqdm(glob.glob(path+"/JPEGImages/*.jpg")):
    #     try:
    #         img = Image.open(p)
    #         del img
    #     except:
    #         os.remove(p)
    import tqdm

    dataset = FoodDataset_with_mask(path,
                          "train")
    for idx, d in enumerate(tqdm.tqdm(dataset, total=len(dataset))):
        pass
    #     img, target = d
    #     bboxes = target["boxes"].numpy()
    #     for i in range(len(bboxes)):
    #         xmin, ymin, xmax, ymax = bboxes[i]
            # if xmin >= xmax or ymin >= ymax:
            #     print(name, bboxes[i], "xmin > xmax")
            #     img_path = name
            #     xml_path = name.replace(".jpg", ".xml").replace("JPEGImages", "Annotations")
            #     if os.path.exists(img_path):
            #         os.remove(img_path)
            #         os.remove(xml_path)




