from mxnet.gluon.data import Dataset
from .pycococreatortools import create_annotation_info,create_image_info
import datetime,os
import json
import numpy as np
import cv2
from tqdm import tqdm
from PIL import Image
class DetectionDataset(Dataset):
    def __init__(self,*args, **kwargs):
        super(DetectionDataset,self).__init__()
    def __getitem__(self, item):
        raise NotImplementedError()
    def __len__(self):
        raise NotImplementedError()
    def viz(self,indexes = None):
        from gluoncv.utils.viz import  plot_bbox
        import matplotlib.pyplot as plt
        if indexes is None:
            indexes = range(len(self))
        for index in indexes:
            x = self[index]
            plot_bbox(x[0],x[1][:,:4],labels=x[1][:,4],class_names = self.classes)
            plt.show()

    @property
    def num_classes(self):
        return len(self.classes)
    @property
    def num_class(self):
        return  self.num_classes
    def at_with_image_path(self,idx):
        '''
        return img_path and bbox, implement this if you want to use to_coco. 
        Note the bbox in this case cannot be transformed.
        '''
        raise NotImplementedError()
    def to_roidb(self,db_path):
        '''
        Compressing this dataset to a pickeled roidb format file, which may 
        be helpful if you want to train your own Deformable Convnets Model.
        '''
        r = []
        for i in tqdm(range(len(self))):
            img_path_ori,bbox = self.at_with_image_path(i)
            img = np.array(Image.open(img_path_ori))
    
            onedb = {}
            onedb["boxes"] = bbox[:,:4].astype(np.int32)
            onedb["height"] = img.shape[0]
            onedb["width"] = img.shape[1]
            onedb["image"] = img_path_ori
            onedb["flipped"] = False
    
            num_objs = bbox.shape[0]
            num_classes = self.num_classes + 1
            overlaps = np.zeros(shape=(num_objs, num_classes), dtype=np.float32)
            for idx in range(bbox.shape[0]):
                cls = bbox[idx,4]
                overlaps[idx,cls] = 1.0
            onedb["gt_classes"] = bbox[:,4].astype(np.int32) + 1
            onedb["gt_overlaps"] = overlaps
            onedb["max_classes"] = overlaps.argmax(axis=1)
            onedb["max_overlaps"] = overlaps.max(axis=1)
            r.append(onedb)
        import pickle
        pickle.dump(r,open(db_path,"wb"),protocol = 0)        
    def to_coco(self,anno_path,INFO = None,LICENSES = None):
        '''
        Just convert this data set to coco detection format. 
        Currently segmentation is not supported.
        '''
        classes = self.classes
        if INFO is None:
            INFO = {
                "description": "Example Dataset",
                "url": "https://github.com/waspinator/pycococreator",
                "version": "0.1.0",
                "year": 2018,
                "contributor": "kohill",
                "date_created": datetime.datetime.utcnow().isoformat(' ')
            }
        if LICENSES is None:
            LICENSES = [
                {
                    "id": 1,
                    "name": "Attribution-NonCommercial-ShareAlike License",
                    "url": "http://creativecommons.org/licenses/by-nc-sa/2.0/"
                }
            ]
        CATEGORIES = []
        for cat_id in range(1, len(classes)):
            CATEGORIES.append(
                {
                    'id': cat_id,
                    'name': classes[cat_id],
                    'supercategory': 'root',
                }
            )
        coco_output = {
                "info": INFO,
                "licenses": LICENSES,
                "categories": CATEGORIES,
                "images": [],
                "annotations": []
            }

        objs = {}
        objs_id = {}
        for idx in range(len(self)):
            img_path,bbox = self.at_with_image_path(idx)
            objs_id[img_path] = idx
            for x0,y0,x1,y1,cls in bbox[:,:5]:
                try:
                    objs[img_path].append([x0, y0, x1, y1, cls])
                except KeyError:
                    objs[img_path] = [[x0, y0, x1, y1, cls]]
        segmentation_id = 1
        # filter for jpeg images
        for image_path in tqdm(objs.keys()):
            image_id = objs_id[image_path]
            image = cv2.imread(image_path)
            image_info = create_image_info(
                image_id, os.path.basename(image_path), [image.shape[1],image.shape[0]])
            coco_output["images"].append(image_info)

            bboxes = objs[image_path]
            for x0, y0, x1, y1, class_id in bboxes:
                category_info = {'id': int(class_id), 'is_crowd': 0}
                binary_mask = np.zeros(shape=np.array(image).shape[:2])
                binary_mask[y0:y1, x0:x1] = 1
                assert x1 - x0 >= 0, x0
                assert y1 - y0 >= 0, y0
                annotation_info = create_annotation_info(
                    segmentation_id, image_id, category_info, binary_mask=binary_mask,
                    image_size=[image.shape[1],image.shape[0]], tolerance=2, bounding_box=np.array([x0, y0, x1 - x0, y1 - y0]))
                assert annotation_info is not None
                if annotation_info is not None:
                    coco_output["annotations"].append(annotation_info)
                else:
                    print(x0, y0, x1, y1, binary_mask.shape)
                segmentation_id = segmentation_id + 1

        with open(anno_path, 'w') as output_json_file:
            json.dump(coco_output, output_json_file)
    def coco_detection_evaluate(self,bbox_result,anno_path = None):
        """
        :param anno_path: The path of coco annotation file.
        :param bbox_result: A list restoring the detection result.
            Each item is a tuple, like (image_path, x0, y0, x1, y1, class id, cls)
        class id is an int number in [0, num_classes-1], where num_classes is 80.
        :return: return coco scoring result.
        """
        from lib.pycocotools.coco import COCO
        from lib.pycocotools.cocoeval import COCOeval
        if anno_path is None:
            assert len(self._coco) == 1
            cocoGt = self._coco[0] #
        else:
            cocoGt = COCO(anno_path)
        f_coco_json = open("result.json","wt")
        images = cocoGt.dataset["images"]
        image2id = {}
        for l in images:
            image2id[l["file_name"]] = l["id"]
        coco_result_json = []
        catids = cocoGt.getCatIds()
        imgIds = []
        for result in bbox_result:
            image_path,x0,y0,x1,y1,classid,cls = result
            one_img = {}
            one_img["image_id"] = int(image2id[image_path])
            imgIds.append(image2id[image_path])
            one_img["category_id"] = catids[classid]
            one_img["bbox"] = [float(x0), float(y0), float(x1 - x0), float(y1 - y0)]
            one_img["score"] = float(cls)
            coco_result_json.append(one_img)

        cocoDt = cocoGt.loadRes(coco_result_json)
        cocoEval = COCOeval(cocoGt, cocoDt, "bbox")
        cocoEval.params.imgIds = imgIds
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()
        return cocoEval
BBOX_Base_Dataset = DetectionDataset