# torch
import torch
import torch.utils.data as data
# coco
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
# utility
import numpy as np
import cv2
import matplotlib.pyplot as plt
import json
import pickle
# system
import os.path as osp

COCO_ROOT = '/media/slytheringe/Data/develop/Deep Learning/DataSet/coco/'
ANNOTATIONS = 'annotations'
INSTANCES_SET = 'instances_{}.json'
COCO_API = 'PythonAPI'

COCO_CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
                'train', 'truck', 'boat', 'traffic light', 'fire', 'hydrant',
                'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
                'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra',
                'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
                'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
                'kite', 'baseball bat', 'baseball glove', 'skateboard',
                'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
                'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
                'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
                'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
                'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
                'keyboard', 'cell phone', 'microwave oven', 'toaster', 'sink',
                'refrigerator', 'book', 'clock', 'vase', 'scissors',
                'teddy bear', 'hair drier', 'toothbrush')

def get_label_map(label_map_file):
    # cat to label
    label_map = {}
    labels = open(label_map_file, 'r')
    for line in labels:
        ids = line.split(',')
        label_map[int(ids[0])] = int(ids[1])
    return label_map

def get_cat_map(cat_map_file):
    # label to cat
    cat_map = {}
    categories = open(cat_map_file, 'r')
    for line in categories:
        cat = line.split(',')
        cat_map[int(cat[1])] = int(cat[0])
    return cat_map

class COCOAnnotationTransform(object):
    """Transforms a COCO annotation into a Tensor of bbox coords and label index
    Initilized with a dictionary lookup of classnames to indexes
    """
    def __init__(self):
        self.label_map = get_label_map(osp.join('./data','coco_labels.txt'))

    def __call__(self, target, width, height):
        """
        Args:
            target (dict): COCO target json annotation as a python dict
            height (int): height
            width (int): width
        Returns:
            a list containing lists of bounding boxes  [bbox coords, class idx]
        """
        scale = np.array([width, height, width, height])
        res = []
        for obj in target:
            if 'bbox' in obj:
                bbox = obj['bbox']
                bbox[2] += bbox[0]
                bbox[3] += bbox[1]
                label_idx = self.label_map[obj['category_id']] - 1 #id starts from zere   
                final_box = list(np.array(bbox)/scale)
                final_box.append(label_idx)
                res += [final_box]       # [xmin, ymin, xmax, ymax, label_idx]
            else:
                print("no bbox in annotation!")
        return np.array(res)   # [[xmin, ymin, xmax, ymax, label_idx], ... ]
    

class COCODetectionDataset(data.Dataset):
    def __init__(self, root, image_set, transform=None, target_transform=COCOAnnotationTransform(), dataset_name='MS COCO'):
        # sys.path.append(osp.join(root, COCO_API))
        self.coco = COCO(osp.join(root,ANNOTATIONS,INSTANCES_SET.format(image_set)))
        self.ids = list(self.coco.imgToAnns.keys()) # a list contains selected pic ids
        self.transform = transform
        self.target_transform = target_transform
        self.name = dataset_name
        self.root = root
        self.num_classes = len(COCO_CLASSES)
        if image_set == 'minival2014' or image_set == 'valminusminival2014':
            self.image_set = 'val2014'
        else:
            self.image_set = image_set

    def __len__(self):
        return len(self.ids)

    def __getitem__(self, index):
        im, gt, h, w = self.pull_item(index)
        return im, gt

    def pull_item(self, index):
        """                           
        Args:
            index (int): Index
        Returns:
            tuple: Tuple (image, target, height, width).
                   target is the object returned by ``coco.loadAnns``.
        """
        img_id = self.ids[index]
        target = self.coco.imgToAnns[img_id]    #dict 中的键为 id, 值为一个 list, 包含所有的属于这张图的标注信息. 其中 list 的成员类型为字典, 其元素是 “annotations” 域内的一条(或多条)标注信息.
        # ann_ids = self.coco.getAnnIds(imgIds=img_id)    # 获取对应图片id对应的标注目标id
        # target = self.coco.loadAnns(ann_ids)    # 读取标注目标id所对应的标注信息

        path = osp.join(self.root, self.image_set, self.coco.loadImgs(img_id)[0]['file_name'])
        assert osp.exists(path), 'Image path does not exist: {}'.format(path)
        img = cv2.imread(path)
        # to rgb
        img = img[:,:,(2,1,0)]

        height, width, _ = img.shape
        if self.target_transform is not None:
            target = self.target_transform(target, width, height)
        if self.transform is not None:
            target = np.array(target)
            img, boxes, labels = self.transform(img, target[:,:4], target[:,4])
            target = np.hstack((boxes,np.expand_dims(labels,axis=1)))   # 将target合成为n行5列np array 前四列分别为bbox左上右下的横纵坐标 最后一列为类别

        return torch.from_numpy(img).permute(2,0,1), target, height, width
    
    def pull_image(self, index):
        '''Returns the original image object at index in PIL form

        Note: not using self.__getitem__(), as any transformations passed in
        could mess up this functionality.

        Argument:
            index (int): index of img to show
        Return:
            cv2 img
        '''
        img_id = self.ids[index]
        path = self.coco.loadImgs(img_id)[0]['file_name']
        return cv2.imread(osp.join(self.root, path), cv2.IMREAD_COLOR)    
    def pull_anno(self, index):
        '''Returns the original annotation of image at index

        Note: not using self.__getitem__(), as any transformations passed in
        could mess up this functionality.

        Argument:
            index (int): index of img to get annotation of
        Return:
            list:  [img_id, [(label, bbox coords),...]]
                eg: ('001718', [('dog', (96, 13, 438, 332))])
        '''
        img_id = self.ids[index]
        return self.coco.imgToAnns[img_id]
    
    def __repr__(self):
        fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
        fmt_str += '    Number of datapoints: {}\n'.format(self.__len__())
        fmt_str += '    Root Location: {}\n'.format(self.root)
        tmp = '    Transforms (if any): '
        fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp))) 
        tmp = '    Target Transforms (if any): '
        fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
        return fmt_str
    '''
    模型评估
    '''
    def __get_thr_ind(self, coco_eval, thr):
        # return the index of the IoU threshold in the list
        # [0.5 0.55 0.6 0.65 0.7 0.75 0.8 0.85 0.9 0.95]
        # np.where returns (array([x]),)
        # (array([x]),)[0][0] = x
        ind = np.where((coco_eval.params.iouThrs > thr - 1e-5) & 
                        (coco_eval.params.iouThrs < thr + 1e-5))[0][0]
        iou_thr = coco_eval.params.iouThrs[ind]
        assert np.isclose(iou_thr, thr)
        return ind 

    def _print_detection_eval_metrics(self, coco_eval):
        IoU_lo_thresh = 0.5
        IoU_hi_thresh = 0.95

        ind_lo = self.__get_thr_ind(coco_eval, IoU_lo_thresh)
        ind_hi = self.__get_thr_ind(coco_eval, IoU_hi_thresh)
        # precision has dims (iou, recall, cls, area range, max dets)
        # area range index 0: all area ranges
        # max dets index 2: 100 per image
        precision = coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, :, 0, 2]
        ap_default = np.mean(precision[precision > -1])
        print('~~~~ Mean and per-category AP @ IoU=[{:.2f},{:.2f}] ~~~~'.format(IoU_lo_thresh, IoU_hi_thresh))  
        print('{:.1f}'.format(100 * ap_default))
        # TODO: print result of different target size
        for cls_idx, cls in enumerate(COCO_CLASSES):
            precision = coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, cls_idx, 0, 2]
            ap = np.mean(precision[precision > -1])
            print('Category:{}:{:.1f}'.format(cls, 100 * ap))
        
        print('~~~~ Summary metrics ~~~~')
        coco_eval.summarize()

    def _do_detection_eval(self, res_file, output_dir):
        # 从文件夹里读取出检测的结果，使用cocoeval对结果进行评估，调用函数打印评估结果并将结果保存
        ann_type = 'bbox'
        coco_dt = self.coco.loadRes(res_file)
        coco_eval = COCOeval(self.coco, coco_dt)
        coco_eval.params.useSegm = (ann_type == 'segm')
        coco_eval.evaluate()
        coco_eval.accumulate()
        self._print_detection_eval_metrics(coco_eval)
        eval_file = osp.join(output_dir, 'detection_result.pkl')
        with open(eval_file, 'wb') as fid:
            pickle.dump(coco_eval, fid, pickle.HIGHEST_PROTOCOL)
        print('Wrote COCO eval results to: {}'.format(eval_file))
    
    def _coco_result_one_category(self, boxes, cat_id):
        results = []
        for im_idx, ids in enumerate(self.ids):
            '''
            ???这里boxes传出来的是啥玩意，是第im_idx张图上所有的目标吗？但又怎么和cat_id对应上的呢
            '''
            dets = boxes[im_idx].astype(np.float)
            if dets == []:
                continue
            scores = dets[:, -1]
            xs = dets[:, 0]
            ys = dets[:, 1]
            ws = dets[:, 2] - xs + 1
            hs = dets[:, 3] - ys + 1
            results.extend(
                [{
                    'image_id': ids,
                    'category_id': cat_id,
                    'bbox': [xs[k], ys[k], ws[k], hs[k]],
                    'score': scores[k]
                } for k in range(dets.shape[0])]
            )
        return results
    
    def _write_coco_results_file(self, all_boxes, res_file):
        # [{"image_id": 42,
        #   "category_id": 18,
        #   "bbox": [258.15,41.29,348.26,243.78],
        #   "score": 0.236}, ...]
        cat_map = get_cat_map(osp.join('./data','coco_labels.txt'))
        results = []
        for cls_idx, cls_name in enumerate(COCO_CLASSES):
            print('Collecting {} results ({:d}/{:d})'.format(cls_name, cls_idx+1,
                                                            self.num_classes))        
            coco_cat_id = cat_map[cls_idx+1]
            results.extend(self._coco_result_one_category(all_boxes[cls_idx], coco_cat_id))
        print('Writing results json to {}'.format(res_file))
        with open(res_file, 'w') as fid:
            json.dump(results, fid)

    def evaluate_detections(self, all_boxes, output_dir):
        res_file = osp.join(output_dir, ('detections_' + self.name + '_results'))
        res_file += '.json'
        self._write_coco_results_file(all_boxes, res_file)
        # do evaluation
        self._do_detection_eval(res_file, output_dir)
        # optionally clean up results json file



if __name__ == "__main__":
    print(len(COCO_CLASSES))