import numpy as np
import torch
import pycocotools.mask as mask_util
from pycocotools.cocoeval import COCOeval
from pycocotools.coco import COCO
import contextlib
import io


def prepare_results_segm(segmPredicted, target, labels, scores, masks):
    cocovalPrediction = []
    rles = [
        mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0] for mask in masks
    ]
    for k, rle in enumerate(rles):
        rle["counts"] = rle["counts"].decode("utf-8")
        cocovalPrediction.append( {
            "image_id": target['image_id'].item(),
            "category_id": labels[k],
            "segmentation": rle,
            "score": scores[k],
        })
    segmPredicted.extend(cocovalPrediction)


def prepare_results_bbox(bboxPredicted, target, labels, scores, boxes):
    def convert_to_xywh(boxes):
        xmin, ymin, xmax, ymax = boxes.unbind(1)
        return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)

    cocovalPrediction = []
    for k, box in enumerate(convert_to_xywh(boxes).tolist()):
        cocovalPrediction.append( {
            "image_id": target['image_id'].item(),
            "category_id": labels[k],
            "bbox": box,
            "score": scores[k],
        })
    bboxPredicted.extend(cocovalPrediction)


# def prepare_results_keypoints(coco, target, labels, scores, keypoints):
#     cocovalPrediction = []
#     for k, keypoint in enumerate(keypoints.flatten(start_dim=1).tolist()):
#         cocovalPrediction.append( {
#             "image_id": target['image_id'].item(),
#             "category_id": labels[k],
#             "keypoints": keypoint,
#             "score": scores[k],
#         })
#     return COCO.loadRes(coco, cocovalPrediction)


def accumulate_metrics(segmPredicted, bboxPredicted, targets, predictions):
    for target, prediction in zip(targets, predictions):
        masks = prediction["masks"].cpu()
        masks = masks > 0.5
        scores = prediction["scores"].tolist()
        labels = prediction["labels"].tolist()
        boxes = prediction["boxes"]

        prepare_results_segm(segmPredicted, target, labels, scores, masks)
        prepare_results_bbox(bboxPredicted, target, labels, scores, boxes)
        # if "keypoints" in prediction:
        #     keypoints = prediction["keypoints"]
        #     keypointsPredictedCoco = prepare_results_keypoints(data_loader_test.dataset.coco, target, labels, scores, keypoints)


coco_metric_names = {
    'mAP': 0,
    'mAP_50': 1,
    'mAP_75': 2,
    'mAP_s': 3,
    'mAP_m': 4,
    'mAP_l': 5,
    'AR@100': 6,
    'AR@300': 7,
    'AR@1000': 8,
    'AR_s@1000': 9,
    'AR_m@1000': 10,
    'AR_l@1000': 11
}


def compute_metrics(coco, segmPredicted, bboxPredicted):
    redirect_string = io.StringIO()
    with contextlib.redirect_stdout(redirect_string):
        segmPredictedCoco = COCO.loadRes(coco, segmPredicted)
        bboxPredictedCoco = COCO.loadRes(coco, bboxPredicted)

        metrics = {}

        for t, data in zip(('segm','bbox'), (segmPredictedCoco, bboxPredictedCoco)):

            cocoEval = COCOeval(coco, data, t)
            cocoEval.evaluate()
            cocoEval.accumulate()
            cocoEval.summarize()

            # Average Precision  (AP) @[ IoU=0.50:0.95 | area=   all | maxDets=200 ] = 0.507
            # Average Precision  (AP) @[ IoU=0.50      | area=   all | maxDets=200 ] = 0.699
            # Average Precision  (AP) @[ IoU=0.75      | area=   all | maxDets=200 ] = 0.575
            # Average Precision  (AP) @[ IoU=0.50:0.95 | area= small | maxDets=200 ] = 0.586
            # Average Precision  (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=200 ] = 0.519
            # Average Precision  (AP) @[ IoU=0.50:0.95 | area= large | maxDets=200 ] = 0.501
            # Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=200 ] = 0.598
            # Average Recall     (AR) @[ IoU=0.50:0.95 | area= small | maxDets=200 ] = 0.640
            # Average Recall     (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=200 ] = 0.566
            # Average Recall     (AR) @[ IoU=0.50:0.95 | area= large | maxDets=200 ] = 0.564

            m = {}
            for item in coco_metric_names.keys():
                m[item] = cocoEval.stats[coco_metric_names[item]]

            metrics[t] = m

        return metrics