import torch
from PIL import Image
import numpy as np
from pycocotools.coco import COCO


class CocoForMaskRCNNDataset(torch.utils.data.Dataset):

    def __init__(self, images_root, annotations, transforms=None, entry_type=[]):
        self.imDir = images_root
        self.annFile = annotations
        self.entry_type = entry_type

        self.transforms = transforms

        self.coco=COCO(self.annFile)
        self.catIds = self.coco.getCatIds(catNms=self.entry_type)
        self.imgIds = self.coco.getImgIds(catIds=self.catIds)

    def __getitem__(self, idx):
        img = self.coco.loadImgs( self.imgIds[idx] )[0]
        im_path = f'{self.imDir}/{img["file_name"]}'
        image = Image.open(im_path).convert("RGB")

        annIds = self.coco.getAnnIds( imgIds=img['id'], catIds=self.catIds, iscrowd=None )
        anns = self.coco.loadAnns( annIds )
        # anns[0]['segmentation']
        # anns[0]['bbox']
        # id, category_id, image_id, iscrowd, area

        masks = []
        for i in range(len(anns)):
            masks.append(self.coco.annToMask(anns[i]))

        torch_masks = torch.as_tensor(np.array(masks, dtype=np.uint8), dtype=torch.uint8)

        num_objs = len(anns)
        labels = torch.ones((num_objs,), dtype=torch.int64)
        boxes = []
        area = []
        for ann in anns:
            # COCO: "bbox" : [x,y,width,height]
            boxes.append([
                ann['bbox'][0],
                ann['bbox'][1],
                ann['bbox'][0] + ann['bbox'][2],
                ann['bbox'][1] + ann['bbox'][3],
                ])
            area.append(ann['area'])
        image_id = torch.tensor([ self.imgIds[idx] ])
        iscrowd = torch.zeros((num_objs,), dtype=torch.int64)

        target = {}

        # boxes (FloatTensor[N, 4]): the coordinates of the N bounding boxes in [x0, y0, x1, y1] format, ranging from 0 to W and 0 to H
        target["boxes"] = torch.as_tensor(boxes, dtype=torch.float32)

        # labels (Int64Tensor[N]): the label for each bounding box. 0 represents always the background class.
        target["labels"] = labels

        # (optionally) masks (UInt8Tensor[N, H, W]): The segmentation masks for each one of the objects
        target["masks"] = torch_masks

        # image_id (Int64Tensor[1]): an image identifier. It should be unique between all the images in the dataset, and is used during evaluation
        target["image_id"] = image_id

        # area (Tensor[N]): The area of the bounding box. This is used during evaluation with the COCO metric, to separate the metric scores between small, medium and large boxes.
        target["area"] = torch.as_tensor(boxes, dtype=torch.float32)

        # iscrowd (UInt8Tensor[N]): instances with iscrowd=True will be ignored during evaluation.
        target["iscrowd"] = iscrowd

        if self.transforms is not None:
            image, target = self.transforms(image, target)

        return image, target

    def get_categories(self):
        return self.coco.cats

    def __len__(self):
        return len(self.imgIds)
