import os
import cv2
import json
import math
import numpy as np

import torch
import torch.utils.data as data
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval

from utils.image import get_border, get_affine_transform, affine_transform, color_aug
from utils.image import draw_umich_gaussian, gaussian_radius,draw_truncate_gaussian

COCO_NAMES = ['__background__', "windmill","vehicle","trainstation","tenniscourt","storagetank","stadium","ship","harbor","groundtrackfield",
               "golffield","Expressway-toll-station","Expressway-Service-area","dam","chimney","bridge","overpass","basketballcourt","baseballfield","airport","airplane"]
COCO_IDS = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,
            14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
            24, 25, 27, 28, 31, 32, 33, 34, 35, 36,
            37, 38, 39, 40, 41, 42, 43, 44, 46, 47,
            48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
            58, 59, 60, 61, 62, 63, 64, 65, 67, 70,
            72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
            82, 84, 85, 86, 87, 88, 89, 90]

COCO_MEAN = [0.40789654, 0.44719302, 0.47026115]
COCO_STD = [0.28863828, 0.27408164, 0.27809835]
COCO_EIGEN_VALUES = [0.2141788, 0.01817699, 0.00341571]
COCO_EIGEN_VECTORS = [[-0.58752847, -0.69563484, 0.41340352],
                      [-0.5832747, 0.00994535, -0.81221408],
                      [-0.56089297, 0.71832671, 0.41158938]]


class COCO(data.Dataset):

    def __init__(self,args, split, split_ratio=1.0):
        super(COCO, self).__init__()
        self.num_classes = 20#revise
        self.class_name = COCO_NAMES
        self.valid_ids = COCO_IDS
        self.cat_ids = {v: i for i, v in enumerate(self.valid_ids)}

        self.data_rng = np.random.RandomState(123)
        self.eig_val = np.array(COCO_EIGEN_VALUES, dtype=np.float32)
        self.eig_vec = np.array(COCO_EIGEN_VECTORS, dtype=np.float32)
        self.mean = np.array(COCO_MEAN, dtype=np.float32)[None, None, :]
        self.std = np.array(COCO_STD, dtype=np.float32)[None, None, :]

        self.split = split
#         self.split_ratio = args.split_ratio
       #以下重写地址
#         print(args.data_dir)
        ########################################################################  
        self.points = args.points
        self.hm_kernel = args.hm_kernel
        self.reg_max = args.reg_max
        self.alpha = args.alpha
        self.omega = args.omega
        self.data_dir = os.path.join(args.data_dir, 'Dior')

        if self.split=='val':
            self.img_dir = os.path.join(self.data_dir, 'JPEGImages-test')
            self.annot_path = os.path.join(self.data_dir, 'coco','instances_val2017.json' )
        else:
            self.img_dir = os.path.join(self.data_dir, 'JPEGImages-trainval')
            self.annot_path = os.path.join(self.data_dir, 'coco','instances_trainval2017.json')
            
#图片没有分开
        ########################################################################
    
        self.max_objs = 800
        self.padding = 31  # 31 for resnet/resdcn
        self.down_ratio = 4
        self.img_size = {'h': args.img_size, 'w': args.img_size}
        self.fmap_size = {'h': args.img_size // self.down_ratio, 'w': args.img_size // self.down_ratio}
        self.rand_scales = np.arange(0.6, 1.3, 0.1)
        self.gaussian_iou = 0.7
        

        print('==> initializing coco 2017 %s data.' % split)
        self.coco = coco.COCO(self.annot_path)
        self.images = self.coco.getImgIds()

        if 0 < args.split_ratio < 1:
            split_size = int(np.clip(args.split_ratio * len(self.images), 1, len(self.images)))
            self.images = self.images[:split_size]

        self.num_samples = len(self.images)

        print('Loaded %d %s samples' % (self.num_samples, split))

    def __getitem__(self, index):
        img_id = self.images[index]
        img_path = os.path.join(self.img_dir, self.coco.loadImgs(ids=[img_id])[0]['file_name'])
        ann_ids = self.coco.getAnnIds(imgIds=[img_id])
        annotations = self.coco.loadAnns(ids=ann_ids)
        labels = np.array([self.cat_ids[anno['category_id']] for anno in annotations])
        bboxes = np.array([anno['bbox'] for anno in annotations], dtype=np.float32)
        if len(bboxes) == 0:
            bboxes = np.array([[0., 0., 0., 0.]], dtype=np.float32)
            labels = np.array([[0]])
        bboxes[:, 2:] += bboxes[:, :2]  # xywh to xyxy

        img = cv2.imread(img_path)
#         print(img_path)
        height, width = img.shape[0], img.shape[1]
        center = np.array([width / 2., height / 2.], dtype=np.float32)  # center of image
        scale = max(height, width) * 1.0

        flipped_w = False
        flipped_h = False
        if self.split == 'train':
            scale = scale * np.random.choice(self.rand_scales)
            w_border = get_border(128, width)
            h_border = get_border(128, height)
            center[0] = np.random.randint(low=w_border, high=width - w_border)
            center[1] = np.random.randint(low=h_border, high=height - h_border)

            if np.random.random() < 0.5:
                flipped_w = True
                img = img[:, ::-1, :]
                center[0] = width - center[0] - 1
            if np.random.random() < 0.5:  # 0.5
                flipped_h = True
                img = img[::-1, :, :]  # 上下翻转
                center[1] = height - center[1] - 1

        trans_img = get_affine_transform(center, scale, 0, [self.img_size['w'], self.img_size['h']])
        img = cv2.warpAffine(img, trans_img, (self.img_size['w'], self.img_size['h']))



        img = img.astype(np.float32) / 255.

        if self.split == 'train':
            color_aug(self.data_rng, img, self.eig_val, self.eig_vec)

        img -= self.mean
        img /= self.std
        img = img.transpose(2, 0, 1)  # from [H, W, C] to [C, H, W]

        trans_fmap = get_affine_transform(center, scale, 0, [self.fmap_size['w'], self.fmap_size['h']])
        ##########################################
        #定义将距离分为多少类
        cls_num = int(self.reg_max/self.omega)+1
        ##########################################   
        hmap = np.zeros((self.num_classes, self.fmap_size['h'], self.fmap_size['w']), dtype=np.float32)  # heatmap
        cor_att = np.zeros((self.points, self.fmap_size['h'],  self.fmap_size['w']), dtype=np.float32)
        w_h_ = np.zeros((self.max_objs, 2), dtype=np.float32)  # width and height
        regs = np.zeros((self.max_objs, 2), dtype=np.float32)  # regression
        inds = np.zeros((self.max_objs,), dtype=np.int64)
        ind_masks = np.zeros((self.max_objs,), dtype=np.uint8)
        tblr_high = np.zeros((self.max_objs, cls_num*4), dtype=np.float32)#到四条边的距离 向上
        tblr_low = np.zeros((self.max_objs, cls_num*4), dtype=np.float32)#到四条边的距离 向下
        tblr_cls = np.zeros((self.max_objs, cls_num*4), dtype=np.float32)#到四条边的距离 向下
        tblr_reg = np.zeros((self.max_objs, 4), dtype=np.float32)#到四条边的距离 
        boxes = np.zeros((self.max_objs, 4), dtype=np.float32)#add bboxes
        
#         boxs = np.zerop((self.max_objs, 4), dtype=np.float32)
        alpha = self.alpha#0.54,0.7,1.0,1.2,1.6,3,0.4,0.2

        
        # detections = []
        for k, (bbox, label) in enumerate(zip(bboxes, labels)):
            if flipped_w:
                bbox[[0, 2]] = width - bbox[[2, 0]] - 1
            if flipped_h:
                bbox[[1, 3]] = height - bbox[[3, 1]] - 1    
            bbox[:2] = affine_transform(bbox[:2], trans_fmap)
            bbox[2:] = affine_transform(bbox[2:], trans_fmap)
            bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, self.fmap_size['w'] - 1)
            bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, self.fmap_size['h'] - 1)
            h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]

            if h > 0 and w > 0:

                obj_c = np.array([(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
                obj_c_int = obj_c.astype(np.int32)


                t = obj_c_int [1] - bbox[1]
                b = bbox[3]-obj_c_int [1]
                l = obj_c_int [0] - bbox[0]
                r = bbox[2]-obj_c_int [0]

                if t>0 and b>0 and l>0 and r>0:
                    #---------------------------------------------------------------------

        
                    tblr_high[k][0:cls_num*1] = angle_smooth_label(1. * t,self.reg_max,"high",self.omega)
                    tblr_high[k][cls_num*1:cls_num*2]= angle_smooth_label(1. * b,self.reg_max,"high",self.omega)
                    tblr_high[k][cls_num*2:cls_num*3] = angle_smooth_label(1. * l,self.reg_max,"high",self.omega)
                    tblr_high[k][cls_num*3:cls_num*4] = angle_smooth_label(1. * r,self.reg_max,"high",self.omega)
                
                    tblr_low[k][0:cls_num*1] = angle_smooth_label(1. * t,self.reg_max,"low",self.omega)
                    tblr_low[k][cls_num*1:cls_num*2]= angle_smooth_label(1. * b,self.reg_max,"low",self.omega)
                    tblr_low[k][cls_num*2:cls_num*3] = angle_smooth_label(1. * l,self.reg_max,"low",self.omega)
                    tblr_low[k][cls_num*3:cls_num*4] = angle_smooth_label(1. * r,self.reg_max,"low",self.omega)
                    
                    
                    tblr_cls[k] = np.maximum(tblr_high[k],tblr_low[k])
                    tblr_reg[k] = 1. *t, 1. *b, 1. *l, 1. *r

                    radius = max(0, int(gaussian_radius((math.ceil(h), math.ceil(w)), self.gaussian_iou)))

                    if self.hm_kernel == "CGK":

                        draw_umich_gaussian(hmap[label], obj_c_int, radius)

                    elif self.hm_kernel == "EGK":
                        h_radiuses_alpha = math.ceil(h / 2. * alpha)
                        w_radiuses_alpha = math.ceil(w / 2. * alpha)
                        draw_truncate_gaussian(hmap[label], obj_c_int, h_radiuses_alpha, w_radiuses_alpha)

                    idx = 0
                    if self.points == 24:
                        for y in (bbox[1], bbox[1]/4*3+bbox[3]/4,(bbox[1]+bbox[3])/2, bbox[1]/4+bbox[3]/4*3,bbox[3]):
                            for x in (bbox[0],bbox[0]/4*3+bbox[2]/4,(bbox[0]+bbox[2])/2,bbox[0]/4+bbox[2]/4*3,  bbox[2]):
                                if x == (bbox[0]+bbox[2])/2 and y == (bbox[1]+bbox[3])/2:
                                    continue
                                cor = np.array([x, y])

                                cor_int = cor.astype(np.int32)
        #                         print(cor_int)
        #                         draw_truncate_gaussian(cor_att[idx], cor_int, h_radiuses_alpha, w_radiuses_alpha)
                                draw_umich_gaussian(cor_att[idx], cor_int, int(radius))
                                idx = idx + 1
                    
                    elif self.points == 16:
                        for y in (bbox[1], bbox[1]/3*2+bbox[3]/3,bbox[1]/3+bbox[3]/3*2,bbox[3]):
                            for x in (bbox[0],bbox[0]/3*2+bbox[2]/3,bbox[0]/3+bbox[2]/3*2,  bbox[2]):
                                if x == (bbox[0]+bbox[2])/2 and y == (bbox[1]+bbox[3])/2:
                                    continue
                                cor = np.array([x, y])

                                cor_int = cor.astype(np.int32)
        #                         print(cor_int)
        #                         draw_truncate_gaussian(cor_att[idx], cor_int, h_radiuses_alpha, w_radiuses_alpha)
                                draw_umich_gaussian(cor_att[idx], cor_int, int(radius))
                                idx = idx + 1
                    
                    elif self.points == 8:
                        for y in (bbox[1], (bbox[1]+bbox[3])/2, bbox[3]):
                            for x in (bbox[0],(bbox[0]+bbox[2])/2,  bbox[2]):
                                if x == (bbox[0]+bbox[2])/2 and y == (bbox[1]+bbox[3])/2:
                                    continue
                                cor = np.array([x, y])
                                cor_int = cor.astype(np.int32)
                                draw_umich_gaussian(cor_att[idx], cor_int, int(radius))
                                idx = idx + 1
                    elif self.points == 4:
                        for y in (bbox[1], bbox[3]):
                            for x in (bbox[0], bbox[2]):
                                cor = np.array([x, y])
                                cor_int = cor.astype(np.int32)
                                draw_umich_gaussian(cor_att[idx], cor_int, int(radius))
                                idx = idx + 1
                    
                    w_h_[k] = 1. * w, 1. * h
                    regs[k] = obj_c - obj_c_int  # discretization error
                    inds[k] = obj_c_int[1] * self.fmap_size['w'] + obj_c_int[0]#表示正样本中心点的所在位置，从左到右，从上到下

                    ind_masks[k] = 1

    #                 print(bbox)
                    boxes[k] = bbox[0],bbox[1],bbox[2],bbox[3]#add

        return {'image': img,
                'hmap': hmap, 'w_h_': w_h_, 'regs': regs, 'inds': inds, 'ind_masks': ind_masks,
                'c': center, 's': scale, 'img_id': img_id,"bboxes":boxes, "tblr_high":tblr_high,"tblr_low":tblr_low,"tblr_cls":tblr_cls,"tblr_reg":tblr_reg,"cor":cor_att}

    def __len__(self):
        return self.num_samples


class COCO_eval(COCO):
    def __init__(self, args, split, img_size,test_scales=(1,), test_flip=False, fix_size=True):
        super(COCO_eval, self).__init__(args, split)
        self.test_flip = test_flip
        self.test_scales = test_scales
        self.fix_size = fix_size
        self.img_size = {'h': img_size, 'w': img_size}
        self.split = split

    def __getitem__(self, index):
        img_id = self.images[index]
        img_path = os.path.join(self.img_dir, self.coco.loadImgs(ids=[img_id])[0]['file_name'])
        image = cv2.imread(img_path)
        height, width = image.shape[0:2]

        out = {}
        for scale in self.test_scales:
            new_height = int(height * scale)
            new_width = int(width * scale)

            if self.fix_size:
                img_height, img_width = self.img_size['h'], self.img_size['w']
                center = np.array([new_width / 2., new_height / 2.], dtype=np.float32)
                scaled_size = max(height, width) * 1.0
                scaled_size = np.array([scaled_size, scaled_size], dtype=np.float32)
            else:
                img_height = (new_height | self.padding) + 1
                img_width = (new_width | self.padding) + 1
                center = np.array([new_width // 2, new_height // 2], dtype=np.float32)
                scaled_size = np.array([img_width, img_height], dtype=np.float32)

            img = cv2.resize(image, (new_width, new_height))
            trans_img = get_affine_transform(center, scaled_size, 0, [img_width, img_height])
            img = cv2.warpAffine(img, trans_img, (img_width, img_height))

            img = img.astype(np.float32) / 255.
            img -= self.mean
            img /= self.std
            img = img.transpose(2, 0, 1)[None, :, :, :]  # from [H, W, C] to [1, C, H, W]

            if self.test_flip:
                img = np.concatenate((img, img[:, :, :, ::-1].copy()), axis=0)

            out[scale] = {'image': img,
                          'center': center,
                          'scale': scaled_size,
                          'fmap_h': img_height // self.down_ratio,
                          'fmap_w': img_width // self.down_ratio}

        return img_id, out

    def convert_eval_format(self, all_bboxes):
        # all_bboxes: num_samples x num_classes x 5
        detections = []
        for image_id in all_bboxes:
            for cls_ind in all_bboxes[image_id]:
                category_id = self.valid_ids[cls_ind - 1]
                for bbox in all_bboxes[image_id][cls_ind]:
                    bbox[2] -= bbox[0]
                    bbox[3] -= bbox[1]
                    score = bbox[4]
                    bbox_out = list(map(lambda x: float("{:.2f}".format(x)), bbox[0:4]))

                    detection = {"image_id": int(image_id),
                                 "category_id": int(category_id),
                                 "bbox": bbox_out,
                                 "score": float("{:.2f}".format(score))}
                    detections.append(detection)
        return detections

    def run_eval(self, results, save_dir=None):
        detections = self.convert_eval_format(results)

        if save_dir is not None:
            result_json = os.path.join(save_dir, "results.json")
            json.dump(detections, open(result_json, "w"))

        coco_dets = self.coco.loadRes(detections)
        coco_eval = COCOeval(self.coco, coco_dets, "bbox")
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()
        return coco_eval.stats

    @staticmethod
    def collate_fn(batch):
        out = []
        for img_id, sample in batch:
            out.append((img_id, {s: {k: torch.from_numpy(sample[s][k]).float()
            if k == 'image' else np.array(sample[s][k]) for k in sample[s]} for s in sample}))
        return out
class PascalVOC_eval(COCO):
    def __init__(self, args, split, img_size, test_scales=(1,), test_flip=False, fix_size=True, **kwargs):
        super(PascalVOC_eval, self).__init__(args, split, **kwargs)
        self.test_flip = test_flip
        self.test_scales = test_scales
        self.fix_size = fix_size
        self.img_size = {'h': img_size, 'w': img_size}
        self.split = split

    def __getitem__(self, index):
        img_id = self.images[index]
        img_path = os.path.join(self.img_dir, self.coco.loadImgs(ids=[img_id])[0]['file_name'])
        image = cv2.imread(img_path)
        height, width = image.shape[0:2]

        out = {}
        for scale in self.test_scales:
            new_height = int(height * scale)
            new_width = int(width * scale)

            if self.fix_size:
                img_height, img_width = self.img_size['h'], self.img_size['w']
                center = np.array([new_width / 2., new_height / 2.], dtype=np.float32)
                scaled_size = max(height, width) * 1.0
                scaled_size = np.array([scaled_size, scaled_size], dtype=np.float32)
            else:
                img_height = (new_height | self.padding) + 1
                img_width = (new_width | self.padding) + 1
                center = np.array([new_width // 2, new_height // 2], dtype=np.float32)
                scaled_size = np.array([img_width, img_height], dtype=np.float32)

            img = cv2.resize(image, (new_width, new_height))
            trans_img = get_affine_transform(center, scaled_size, 0, [img_width, img_height])
            img = cv2.warpAffine(img, trans_img, (img_width, img_height))

            img = img.astype(np.float32) / 255.
            img -= self.mean
            img /= self.std
            img = img.transpose(2, 0, 1)[None, :, :, :]  # from [H, W, C] to [1, C, H, W]

            if self.test_flip:
                img = np.concatenate((img, img[:, :, :, ::-1].copy()), axis=0)

            out[scale] = {'image': img,
                          'center': center,
                          'scale': scaled_size,
                          'fmap_h': img_height // self.down_ratio,
                          'fmap_w': img_width // self.down_ratio}

        return img_id, out

    def convert_eval_format(self, all_bboxes):
        # all_bboxes: num_samples x num_classes x 5
        detections = [[] for _ in self.class_name[1:]]
        for i in range(self.num_samples):
            img_id = self.images[i]
            img_name = self.coco.loadImgs(ids=[img_id])[0]['file_name'].split('.')[0]
            for j in range(1, self.num_classes + 1):
                if len(all_bboxes[img_id][j]) > 0:
                    for bbox in all_bboxes[img_id][j]:
                        detections[j - 1].append((img_name, bbox[-1], *bbox[:-1]))
                        
                        
                        
        detections = {cls: det for cls, det in zip(self.class_name[1:], detections)}
        
#         print(detections)
        return detections


    def run_eval(self, results, save_dir=None):
        detections = self.convert_eval_format(results)
#         if save_dir is not None:
#             torch.save(detections, os.path.join(save_dir, 'results.t7'))
        #print("detections376",detections)#已验证
        eval_map = eval_mAP(self.split)
        aps, map = eval_map.do_python_eval(detections,use_07=False)
        return map, aps

    @staticmethod
    def collate_fn(batch):
        out = []
        for img_id, sample in batch:
            out.append((img_id, {s: {k: torch.from_numpy(sample[s][k]).float()
            if k == 'image' else np.array(sample[s][k]) for k in sample[s]} for s in sample}))
        return out


class eval_mAP():
    def __init__(self,split):
        #记住这里测试集路径要修改

        self.annot_path = os.path.join('../_DATASET/Dior/coco', 'instances_%s2017.json' % split)
 #------------------------------------------------------------------------------------------------- 
        self.coco = coco.COCO(self.annot_path)
        self.images = self.coco.getImgIds()#得到所有图片的id
        self.valid_ids = COCO_IDS
        self.cat_ids = {v: i for i, v in enumerate(self.valid_ids)}

    def do_python_eval(self, detections, use_07=True):

        aps = []
        # The PASCAL VOC metric changed in 2010
        print('use VOC07 metric ' if use_07 else 'use VOC12 metric ')
        clses = []
        ap_dict = {}
        for i, cls in enumerate(COCO_NAMES[1:]):
            
            rec, prec, ap = self.voc_eval(detections[cls], self.annot_path,
                                          cls, ovthresh=0.5, use_07_metric=use_07)
            clses.append(cls)
            aps += [ap]
            ap_dict = dict(zip(clses,aps))
            print('AP for %s = %.2f%%' % (cls, ap * 100))

        print('Mean AP = %.2f%%' % (np.mean(aps) * 100))

#         print(ap_dict)
        return ap_dict, np.mean(aps)

    def voc_ap(self, recall, precision, use_07_metric=True):
        """ ap = voc_ap(rec, prec, [use_07_metric])
        Compute VOC AP given precision and recall.
        If use_07_metric is true, uses the
        VOC 07 11 point method (default:False).
        """
        if use_07_metric:
            # 11 point metric
            ap = 0.
            for t in np.arange(0., 1.1, 0.1):
                if np.sum(recall >= t) == 0:
                    p = 0
                else:
                    p = np.max(precision[recall >= t])
                ap = ap + p / 11.
        else:
            # correct AP calculation
            # first append sentinel values at the end
            mrec = np.concatenate(([0.], recall, [1.]))
            mpre = np.concatenate(([0.], precision, [0.]))

            # compute the precision envelope
            for i in range(mpre.size - 1, 0, -1):
                mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

            # to calculate area under PR curve, look for points
            # where X axis (recall) changes value
            i = np.where(mrec[1:] != mrec[:-1])[0]

            # and sum (\Delta recall) * prec
            ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
        return ap
    
    
    def coco_box_to_bbox(self,box):

        bbox = np.array([box[0] , box[1] , box[0] + box[2], box[1] + box[3]],dtype=np.float32)

        
        return bbox
    
    
    def voc_eval(self,
                 cls_detections,
                 annopath,
                 classname,
                 cachedir= None,
                 ovthresh=0.5,
                 use_07_metric=False,
                 use_difficult=False):
        """rec, prec, ap = voc_eval(detpath,
                                    annopath,
                                    imagesetfile,
                                    classname,
                                    [ovthresh],
                                    [use_07_metric])
    
        Top level function that does the PASCAL VOC evaluation.
    
        detpath: Path to detections
            detpath.format(classname) should produce the detection results file.
        annopath: Path to annotations
            annopath.format(imagename) should be the xml annotations file.
        imagesetfile: Text file containing the list of images, one image per line.
        classname: Category name (duh)
        cachedir: Directory for caching the annotations
        [ovthresh]: Overlap threshold (default = 0.5)
        [use_07_metric]: Whether to use VOC07's 11 point AP computation
            (default False)
        """
        # assumes detections are in detpath.format(classname)
        # assumes annotations are in annopath.format(imagename)
        # assumes imagesetfile is a text file with each line an image name
        # cachedir caches the annotations in a pickle file

        # first load gt
        
#         cachedir = '.'
#         if not os.path.isdir(cachedir):
#             os.mkdir(cachedir)
#         cachefile = os.path.join(cachedir, 'annots.pkl')
        cachefile = None
        ########################################################################
        # read list of images
#         with open(imagesetfile, 'r') as f:
#             lines = f.readlines()
#         imagenames = [x.strip() for x in lines]
# #         images_num = len(self.images)
# #         for i in range images_num

        imagenames = []
        image_dict = {}
        for image_id in self.images:

            file_name = self.coco.loadImgs(ids=image_id)[0]['file_name']
            file_name = file_name.split('.')[0]
            imagenames.append(file_name)
            
#         image_dict = dict(zip(imagenames,self.images))
            
#         if not os.path.isfile(cachefile):
            # load annotations
        if True:
            recs = {}
            for i, imagename in enumerate(imagenames):
                
                image_id = self.images[i]
                ann_ids = self.coco.getAnnIds(imgIds=[image_id])
                anns = self.coco.loadAnns(ids = ann_ids)
                num_objs = len(anns)
                objects = []
                for k in range(num_objs):
                    obj_struct = {}
                    ann = anns[k]  # 第几个标记物的标签
#                     print(ann['bbox'])
#                     print(ann['bbox'])
                    bbox = self.coco_box_to_bbox(ann['bbox']) 
#                     print("bbbbbbbbbbbbbbbb",bbox)
                    #print("ann",ann['category_id'])#ann=3
                    cls_id = int(self.cat_ids[ann['category_id']])+1  
                    #print("cls_id",cls_id)#cls=2
                    cls_name = COCO_NAMES[cls_id]
                    #print("cls_name",cls_name)
                    
                    obj_struct['name'] = cls_name
                    obj_struct['bbox'] = bbox
                    objects.append(obj_struct)
                
#                 image_id = image_dict[imagename]  
                recs[imagename] = objects
            
#                 if i % 100 == 0:
#                      print('Reading annotation for {:d}/{:d}'.format(i + 1, len(imagenames)))
        #########################################################################################   
            # save
#             print('Saving cached annotations to {:s}'.format(cachefile))
#             with open(cachefile, 'wb') as f:
#                 pickle.dump(recs, f)
#         else:
#             # load
#             with open(cachefile, 'rb') as f:
#                 try:
#                     recs = pickle.load(f)
#                 except:
#                     recs = pickle.load(f, encoding='bytes')

        # extract gt objects for this class
#         image_dir = "./data/nwpu_coco_0.6/images/train2017"
#         save_dir = './pic_debug_nwpu'
        class_recs = {}
        npos = 0
        for imagename in imagenames:
            
#             img = cv2.imread(os.path.join(image_dir,imagename+'.jpg'))
#             print()
            
            R = [obj for obj in recs[imagename] if obj['name'] == classname]#classname:airplane
            #print('GT_R',R)
            bbox = np.array([x['bbox'] for x in R])
############################################################################
            difficult = np.array([False for x in R]).astype(np.bool)
############################################################################
#             for box in bbox:
#                 x = box[0]
#                 y = box[1]
#                 w = box[2]
#                 h = box[3]
#                 cv2.rectangle(img, (x,y), (w,h), (0,255,0), 2)
#             cv2.imwrite(os.path.join(save_dir,'{}.jpg'.format(imagename)),img)

############################################################################
            det = [False] * len(R)
            npos = npos + sum(~difficult)
            
            class_recs[imagename] = {'bbox': bbox,
                                     'det': det}
#             print("class_recs[imagename]",class_recs[imagename])
#         print(npos)    
###########################################################################
        # read dets
       
#         print("pre_bbox",cls_detections)
        image_ids = [x[0] for x in cls_detections]
#         print("cls_detec_image_ids",image_ids)
        confidence = np.array([float(x[1]) for x in cls_detections])
#         print('confidence',confidence)
        BB = np.array([[float(z) for z in x[2:]] for x in cls_detections])
#         print("BBBBBBBBBBBB",BB)

        nd = len(image_ids)
        tp = np.zeros(nd)
        fp = np.zeros(nd)

        if BB.shape[0] > 0:
            # sort by confidence
            sorted_ind = np.argsort(-confidence)
            sorted_scores = np.sort(-confidence)
            BB = BB[sorted_ind, :]#预测值BB[置信度，x,y,x,y]#左上 右下
            #print("predict_bbox",BB)
            image_ids = [image_ids[x] for x in sorted_ind]
            #print("pre_image_ids",image_ids)
            
            # go down dets and mark TPs and FPs
            for d in range(nd):
                #print("GTqian",R)
                R = class_recs[image_ids[d]]
                #print("GThou",R)

                bb = BB[d, :].astype(float)
                #print("qqqqqqqqqqqqqqqqqqqqqqqqqqqhou",bb)
            ###################bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb#########################
                bb = [bb[0] , bb[1] , bb[0] + bb[2], bb[1] + bb[3]]
#                 print("bbbbbbbbbbbbbbbbbbbbbbbbbbbqian",bb)
            #############################################
                
                #print("pre_bbbbbb",bb)
                ovmax = -np.inf
                BBGT = R['bbox'].astype(float)
#                 print("id",image_ids[d])
#                 print("BBGT",BBGT)
                
               

                if BBGT.size > 0:
                    # compute overlaps
                    # intersection
#                     print('1111111111111111111111111111111111111')
                    ixmin = np.maximum(BBGT[:, 0], bb[0])
                    iymin = np.maximum(BBGT[:, 1], bb[1])
                    ixmax = np.minimum(BBGT[:, 2], bb[2])
                    iymax = np.minimum(BBGT[:, 3], bb[3])
                    iw = np.maximum(ixmax - ixmin + 1., 0.)
                    ih = np.maximum(iymax - iymin + 1., 0.)
                    inters = iw * ih
                    #print("inters",inters)

                    # union
                    uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
                           (BBGT[:, 2] - BBGT[:, 0] + 1.) *
                           (BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)

                    overlaps = inters / uni
#                     print(overlaps)
                    ovmax = np.max(overlaps)
                    jmax = np.argmax(overlaps)
#                 print(ovmax)
                if ovmax > ovthresh:
                    #print('iiiiiiii')
#                     print(R['det'])
#                     if not R['difficult'][jmax]:
                    if not R['det'][jmax]:
                        #print('aaaaaaaaaaaaaaaaa')
                        tp[d] = 1.
                        R['det'][jmax] = 1
                    else:
                        fp[d] = 1.
                else:
                    fp[d] = 1.

        # compute precision recall
        fp = np.cumsum(fp)
        tp = np.cumsum(tp)
        rec = tp / float(npos)
        # avoid divide by zero in case the first detection matches a difficult
        # ground truth
        prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
        ap = self.voc_ap(rec, prec, use_07_metric)

        return rec, prec, ap


import math
import numpy as np



#KL loss的概率不为1
def pulse_label_KL(label, num_class,p):
    x = np.zeros([num_class])
    x[label] = 1-p
    return x

def pulse_label(label, num_class,p):
    x = np.zeros([num_class])
    x[label] = 1
    return x

def get_all_smooth_label(num_label,p):
    all_smooth_label = []


    for i in range(num_label):
        all_smooth_label.append(pulse_label(i, num_label,p))

    return np.array(all_smooth_label)


# def angle_smooth_label(angle_label, angle_range,flag,omega =1):
#     """
#     :param angle_label: [-90,0) or [-90, 0)
#     :param angle_range: 90 or 180
#     :return:
#     """

#     assert angle_range % omega == 0, 'wrong omega'

  
    
#     angle_range /= omega
#     angle_label /= omega
#     if flag == "high":
#         angle_label_new = np.array(np.ceil(angle_label), np.int32)
#         p = angle_label_new - angle_label
#     elif flag == "low":
#         angle_label_new = np.array(np.floor(angle_label), np.int32)
#         p = angle_label - angle_label_new
# #     print(angle_label_new)
# #     print(angle_label) 
# #     print(1-p,"\n")   
#     all_smooth_label = get_all_smooth_label(int(angle_range),p)
#     inx = angle_label_new == angle_range
#     angle_label_new[inx] = angle_range - 1
#     smooth_label = all_smooth_label[angle_label_new]
#     return np.array(smooth_label, np.float32)

def angle_smooth_label(angle_label, angle_range,flag,omega =10):
    """
    :param angle_label: [-90,0) or [-90, 0)
    :param angle_range: 90 or 180
    :return:
    """

    assert angle_range % omega == 0, 'wrong omega'

  
    
    angle_range /= omega
    angle_label /= omega
#     print(angle_label)
    if flag == "high":
        angle_label_new = np.array(np.ceil(angle_label), np.int32)
#         print(np.ceil(angle_label))
        p = angle_label_new - angle_label
    elif flag == "low":
        angle_label_new = np.array(np.floor(angle_label), np.int32)
        p = angle_label - angle_label_new


    all_smooth_label = get_all_smooth_label(int(angle_range+1),p)
    inx = angle_label_new == angle_range
    angle_label_new[inx] = angle_range - 1
    smooth_label = all_smooth_label[angle_label_new]
    return np.array(smooth_label, np.float32)