# A modification version from chainercv repository.
# (See https://github.com/chainer/chainercv/blob/master/chainercv/evaluations/eval_detection_voc.py)
from __future__ import division

import os
from collections import defaultdict
import numpy as np
import math
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
import torch

import numpy as np
import cv2

def process_prediction_gt(prediction, target):
    box = prediction.get_field("labels").numpy()
    label = target.get_field("labels").numpy()
    valid_map = torch.tensor(np.isin(box, label))
    image_shape = prediction.size
    new_box_loc = prediction.bbox[valid_map, :]
    new_boxes = BoxList(new_box_loc, image_shape)
    new_boxes.add_field("scores", prediction.get_field("scores")[valid_map])
    new_boxes.add_field("labels", prediction.get_field("labels")[valid_map])
    
    not_valid_map = torch.logical_not(valid_map)
    not_box_loc = prediction.bbox[not_valid_map,:]
    not_boxes = BoxList(not_box_loc,image_shape)
    not_boxes.add_field("scores",prediction.get_field("scores")[not_valid_map])
    not_boxes.add_field("labels",prediction.get_field("labels")[not_valid_map])
    return new_boxes,not_boxes
 
def print_image_with_class(prediction, class_name):
    labels = prediction.get_field("labels").numpy()
    valid_map = (labels == class_name)
    image_shape = prediction.size
    new_box_loc = prediction.bbox[valid_map, :]
    new_boxes = BoxList(new_box_loc, image_shape)
    new_boxes.add_field("scores", prediction.get_field("scores")[valid_map])
    new_boxes.add_field("labels", prediction.get_field("labels")[valid_map])
    return new_boxes   

def do_voc_evaluate(dataset, predictions):
    for image_id, prediction in enumerate(predictions):
        image_name = dataset.get_img_name(image_id)
        data_path = "/mnt/sde1/xiaoqianruan/H2FA_R-CNN-main/DETECTRON2_DATASETS/clipart/JPEGImages/"
        gt_boxlist = dataset.get_groundtruth(image_id)
        gt_bbox = gt_boxlist.bbox.numpy()
        gt_label = gt_boxlist.get_field("labels").numpy()
        gt_difficult = gt_boxlist.get_field("difficult").numpy()
        image = cv2.imread(data_path + str(image_name) + ".jpg")
        output_folder0 = "/mnt/sde1/xiaoqianruan/OSHOT/outputs/clipart_images_new0/"
        output_folder1 = "/mnt/sde1/xiaoqianruan/OSHOT/outputs/clipart_images_new1/"
        if not os.path.exists(output_folder1):
            os.makedirs(output_folder1)
        if not os.path.exists(output_folder0):
            os.makedirs(output_folder0)
        for index, box in enumerate(gt_bbox):
            xmin, ymin, xmax, ymax = box
            a = (int(xmin), int(ymin))
            b = (int(xmax), int(ymax))
            cv2.rectangle(image, a, b, (0, 255, 0), 2)
            t_size = cv2.getTextSize(str(gt_label[index]), 1, cv2.FONT_HERSHEY_PLAIN, 1)[0]
            textlbottom = a + np.array(list(t_size))
            cv2.rectangle(image, tuple(a), tuple(textlbottom), (0, 255, 0), -1)
            a = list(a)
            a[1] = int(a[1] + (list(t_size)[1] / 2 + 4))
            label = gt_label[index]
            cv2.putText(image, str(label), tuple(a), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 0, 0), 1)
        cv2.imwrite(output_folder0+str(image_id)+".jpg",image)
        cv2.imwrite(output_folder1+str(image_id)+".jpg",image)

        img_info = dataset.get_img_info(image_id)
        image_width = img_info["width"]
        image_height = img_info["height"]
        prediction,not_prediction = process_prediction_gt(prediction,gt_boxlist)
        # prediction = process_prediction_threshold(prediction,0.95)
        #prediction = print_image_with_class(prediction, 20)
        prediction = prediction.resize((image_width, image_height))
        pred_bbox = prediction.bbox.numpy()
        pred_label = prediction.get_field("labels").numpy()
        pred_score = prediction.get_field("scores").numpy()
        for index, box in enumerate(pred_bbox):
            xmin, ymin, xmax, ymax = box
            a = (int(xmin), int(ymin))
            b = (int(xmax), int(ymax))
            cv2.rectangle(image, a, b, (0, 0, 255), 2)
            t_size = cv2.getTextSize(str(pred_label[index]), 1, cv2.FONT_HERSHEY_PLAIN, 1)[0]
            textlbottom = a + np.array(list(t_size))
            cv2.rectangle(image, tuple(a), tuple(textlbottom), (0, 0, 255), -1)
            a = list(a)
            a[1] = int(a[1] + (list(t_size)[1] / 2 + 4))
            cv2.putText(image, str(pred_label[index]), tuple(a), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), 1)
            score_size = cv2.getTextSize(str(pred_score[index]), 1, cv2.FONT_HERSHEY_PLAIN, 1)[0]
            textrbottom = b - np.array(list(score_size))
            cv2.rectangle(image, tuple(textrbottom), tuple(b), (0, 0, 255), -1)
            b = list(b)
            cv2.putText(image, str(pred_score[index]), (textrbottom[0], b[1]), cv2.FONT_HERSHEY_PLAIN, 1.0,
                        (255, 255, 255), 1)
        cv2.imwrite(output_folder0 + str(image_id) + ".jpg", image)
        
        image1 = cv2.imread(output_folder1+str(image_id)+".jpg")
        prediction = not_prediction.resize((image_width, image_height))
        pred_bbox = prediction.bbox.numpy()
        pred_label = prediction.get_field("labels").numpy()
        pred_score = prediction.get_field("scores").numpy()
        for index, box in enumerate(pred_bbox):
            xmin, ymin, xmax, ymax = box
            a = (int(xmin), int(ymin))
            b = (int(xmax), int(ymax))
            cv2.rectangle(image1, a, b, (255, 0, 255), 2)
            t_size = cv2.getTextSize(str(pred_label[index]), 1, cv2.FONT_HERSHEY_PLAIN, 1)[0]
            textlbottom = a + np.array(list(t_size))
            cv2.rectangle(image1, tuple(a), tuple(textlbottom), (255, 0, 255), -1)
            a = list(a)
            a[1] = int(a[1] + (list(t_size)[1] / 2 + 4))
            cv2.putText(image1, str(pred_label[index]), tuple(a), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), 1)
            score_size = cv2.getTextSize(str(pred_score[index]), 1, cv2.FONT_HERSHEY_PLAIN, 1)[0]
            textrbottom = b - np.array(list(score_size))
            cv2.rectangle(image1, tuple(textrbottom), tuple(b), (255, 0, 255), -1)
            b = list(b)
            cv2.putText(image1, str(pred_score[index]), (textrbottom[0], b[1]), cv2.FONT_HERSHEY_PLAIN, 1.0,
                        (255, 255, 255), 1)
        cv2.imwrite(output_folder1 + str(image_id) + ".jpg", image1)

def plot_prediction(image_id,prediction,output_folder):
    image = cv2.imread(output_folder+str(image_id)+".jpg")
    pred_bbox = prediction.bbox.cpu().numpy()
    pred_label = prediction.get_field("labels").cpu().numpy()
    pred_score = prediction.get_field("scores").cpu().numpy()
    for i,box in enumerate(pred_bbox):
        xmin, ymin, xmax, ymax = box
        a = (int(xmin), int(ymin))
        b = (int(xmax), int(ymax))
        cv2.rectangle(image, a, b, (0, 0, 255), 2)
        t_size = cv2.getTextSize(str(pred_label[i]), 1, cv2.FONT_HERSHEY_PLAIN, 1)[0]
        textlbottom = a + np.array(list(t_size))
        cv2.rectangle(image, tuple(a), tuple(textlbottom), (0, 0, 255), -1)
        a = list(a)
        a[1] = int(a[1] + (list(t_size)[1] / 2 + 4))
        cv2.putText(image, str(pred_label[i]), tuple(a), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), 1)
        score_size = cv2.getTextSize(str(pred_score[i]), 1, cv2.FONT_HERSHEY_PLAIN, 1)[0]
        textrbottom = b - np.array(list(score_size))
        cv2.rectangle(image, tuple(textrbottom), tuple(b), (0, 0, 255), -1)
        b = list(b)
        cv2.putText(image, str(pred_score[i]), (textrbottom[0], b[1]), cv2.FONT_HERSHEY_PLAIN, 1.0,(255, 255, 255), 1)
    cv2.imwrite(output_folder + str(image_id) + ".jpg", image)

def test_voc(dataset,predictions):
    output_folder = "/mnt/sde1/xiaoqianruan/OSHOT/outputs/VOC_baseline/inference/clipart_test/"
    #print(len(predictions))
    #color = [(255,0,0),(0,0,255),(255,0,255),(255,255,0),(0,255,255),(128,128,128),(128,0,128)]
    acc_0, acc_1, acc_2, acc_3, acc_4, acc_5 = [],[],[],[],[],[]
    accuracy = [acc_0,acc_1,acc_2,acc_3,acc_4,acc_5]
    #correct_result,wrong_result1,wrong_result2 = 0,0,0
    result = 0
    for image_id in range(len(predictions[0])):
        gt_boxlist = dataset.get_groundtruth(image_id)
        img_info = dataset.get_img_info(image_id)
        image_width = img_info["width"]
        image_height = img_info["height"]
        pred_boxlist0 = predictions[0][image_id].resize((image_width,image_height))
        pred_boxlist1 = predictions[1][image_id].resize((image_width,image_height))
        #correct_result,wrong_result1,wrong_result2 = analysis_iou(pred_boxlist0,pred_boxlist1,gt_boxlist,correct_result,wrong_result1,wrong_result2)
        result = compare_confidence(dataset,1,image_id,pred_boxlist1,gt_boxlist,accuracy,result)
        #for i in range(len(predictions)):
        #    pred_boxlist = predictions[i][image_id].resize((image_width,image_height))
        #    compare_confidence(dataset,i,image_id,pred_boxlist,gt_boxlist,accuracy,result)
    #print(correct_result,wrong_result1,wrong_result2)
    print(result)
    #analysis_results(accuracy)
    #for image_id,prediction in enumerate(predictions[0]):
    #    gt_boxlist = dataset.get_groundtruth(image_id)
    #    plot_labels(dataset,image_id,output_folder)
    #    img_info = dataset.get_img_info(image_id)
    #    image_width = img_info["width"]
    #    image_height = img_info["height"]
    #    pred_boxlist = prediction.resize((image_width,image_height))
    #    test_iou(dataset,image_id,pred_boxlist,gt_boxlist,output_folder,color[0])
    #for i in range(1,len(predictions)):
    #    for image_id,prediction in enumerate(predictions[i]):
    #        output_folder = "/mnt/sde1/xiaoqianruan/OSHOT/outputs/clipart_predictions/"
    #        gt_boxlist = dataset.get_groundtruth(image_id)
            #plot_labels(dataset,image_id,output_folder)
    #        img_info = dataset.get_img_info(image_id)
    #        image_width = img_info["width"]
    #        image_height = img_info["height"]
    #        pred_boxlist = prediction.resize((image_width,image_height))
    #        test_iou(dataset,image_id,pred_boxlist,gt_boxlist,output_folder,color[i])

def do_voc_confidence(dataset, predictions):
    for image_id, prediction0 in enumerate(predictions[0]):
        img_info = dataset.get_img_info(image_id)
        image_width = img_info["width"]
        image_height = img_info["height"]
        prediction0 = prediction0.resize((image_width, image_height))

        prediction1 = predictions[1][image_id]
        prediction1 = prediction1.resize((image_width, image_height))

        prediction2 = predictions[2][image_id]
        prediction2 = prediction2.resize((image_width, image_height))

        prediction3 = predictions[3][image_id]
        prediction3 = prediction3.resize((image_width, image_height))

        prediction4 = predictions[4][image_id]
        prediction4 = prediction4.resize((image_width, image_height))

        prediction5 = predictions[5][image_id]
        prediction5 = prediction5.resize((image_width, image_height))

        gt_boxlist = dataset.get_groundtruth(image_id)

        compare_confidence(dataset,image_id,prediction0, prediction1, prediction2, prediction3, prediction4, prediction5,gt_boxlist)
        #get_confidence(dataset,image_id,prediction0,prediction1,prediction2,prediction3,prediction4,prediction5)
        #compare_iou(dataset,image_id,prediction0, prediction1, prediction2, prediction3, prediction4,prediction5, gt_boxlist)
        #test_iou(dataset,image_id,prediction,gt_boxlist)

def plot_labels(dataset,image_id,output_folder):
    data_path = "/mnt/sde1/xiaoqianruan/H2FA_R-CNN-main/DETECTRON2_DATASETS/clipart/JPEGImages/"
    image_name = dataset.get_img_name(image_id)
    image = cv2.imread(data_path+str(image_name)+".jpg")
    gt_boxlist = dataset.get_groundtruth(image_id)
    gt_bbox = gt_boxlist.bbox.cpu().numpy()
    gt_label = gt_boxlist.get_field("labels").cpu().numpy()
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)
    for i,box in enumerate(gt_bbox):
        xmin,ymin,xmax,ymax = box
        a = (int(xmin),int(ymin))
        b = (int(xmax),int(ymax))
        cv2.rectangle(image,a,b,(0,255,0),2)
        t_size = cv2.getTextSize(str(gt_label[i]),1,cv2.FONT_HERSHEY_PLAIN,1)[0]
        textlbottom = a + np.array(list(t_size))
        cv2.rectangle(image, tuple(a), tuple(textlbottom), (0, 255, 0), -1)
        a = list(a)
        a[1] = int(a[1] + (list(t_size)[1] / 2 + 4))
        label = gt_label[i]
        cv2.putText(image, str(label), tuple(a), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 0, 0), 1)
    cv2.imwrite(output_folder + str(image_id) + ".jpg", image)

def plot_comparision(image_id,pred_bbox,pred_score,l,output_folder,color):
    #image_name = dataset.get_img_name(image_id)
    #data_path = "/mnt/sde1/xiaoqianruan/H2FA_R-CNN-main/DETECTRON2_DATASETS/clipart/JPEGImages/"
    #image = cv2.imread(data_path+str(image_name)+".jpg")
    image = cv2.imread(output_folder+str(image_id)+".jpg")
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)
    for i,bbox in enumerate(pred_bbox):
        xmin,ymin,xmax,ymax = bbox
        a = (int(xmin),int(ymin))
        b = (int(xmax),int(ymax))
        cv2.rectangle(image,a,b,color,2)
        t_size = cv2.getTextSize(str(l),1,cv2.FONT_HERSHEY_PLAIN,1)[0]
        textlbottom = a + np.array(list(t_size))
        cv2.rectangle(image, tuple(a), tuple(textlbottom), color, -1)
        a = list(a)
        a[1] = int(a[1] + (list(t_size)[1] / 2 + 4))
        cv2.putText(image, str(l), tuple(a), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), 1)
        score_size = cv2.getTextSize(str(pred_score[i]), 1, cv2.FONT_HERSHEY_PLAIN, 1)[0]
        textrbottom = b - np.array(list(score_size))
        cv2.rectangle(image, tuple(textrbottom), tuple(b), color, -1)
        b = list(b)
        cv2.putText(image, str(pred_score[i]), (textrbottom[0], b[1]), cv2.FONT_HERSHEY_PLAIN, 1.0,
                    (255, 255, 255), 1)
    cv2.imwrite(output_folder + str(image_id) + ".jpg", image)
    
def plot_comparision_prediction(image_id,index,pred_bbox,pred_score,l,output_folder,color):
    image = cv2.imread(output_folder+str(image_id)+".jpg")
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)
    for i,bbox in enumerate(pred_bbox[index]):
        xmin,ymin,xmax,ymax = bbox
        a = (int(xmin),int(ymin))
        b = (int(xmax),int(ymax))
        cv2.rectangle(image,a,b,color,2)
        t_size = cv2.getTextSize(str(l),1,cv2.FONT_HERSHEY_PLAIN,1)[0]
        textlbottom = a + np.array(list(t_size))
        cv2.rectangle(image, tuple(a), tuple(textlbottom),color, -1)
        a = list(a)
        a[1] = int(a[1] + (list(t_size)[1] / 2 + 4))
        cv2.putText(image, str(l), tuple(a), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), 1)
        score_size = cv2.getTextSize(str(pred_score[index][i]), 1, cv2.FONT_HERSHEY_PLAIN, 1)[0]
        textrbottom = b - np.array(list(score_size))
        cv2.rectangle(image, tuple(textrbottom), tuple(b), color, -1)
        b = list(b)
        cv2.putText(image, str(pred_score[index][i]), (textrbottom[0], b[1]), cv2.FONT_HERSHEY_PLAIN, 1.0,
                    (255, 255, 255), 1)
    cv2.imwrite(output_folder + str(image_id) + ".jpg", image)

def do_voc_evaluation(dataset, predictions, output_folder, logger):
    # TODO need to make the use_07_metric format available
    # for the user to choose
    pred_boxlists = []
    gt_boxlists = []
    for image_id, prediction in enumerate(predictions):
        img_info = dataset.get_img_info(image_id)
        image_width = img_info["width"]
        image_height = img_info["height"]
        prediction = prediction.resize((image_width, image_height))
        pred_boxlists.append(prediction)

        gt_boxlist = dataset.get_groundtruth(image_id)
        gt_boxlists.append(gt_boxlist)
    result = eval_detection_voc(
        pred_boxlists=pred_boxlists,
        gt_boxlists=gt_boxlists,
        iou_thresh=0.5,
        use_07_metric=True,
    )
    result_str = "mAP: {:.4f}\n".format(result["map"])
    for i, ap in enumerate(result["ap"]):
        if i == 0:  # skip background
            continue
        if not math.isnan(ap):
            result_str += "{:<16}: {:.4f}\n".format(
                dataset.map_class_id_to_class_name(i), ap
            )
    logger.info(result_str)
    if output_folder:
        with open(os.path.join(output_folder, "result.txt"), "w") as fid:
            fid.write(result_str)
    return result

def eval_detection_voc(pred_boxlists, gt_boxlists, iou_thresh=0.5, use_07_metric=False):
    """Evaluate on voc dataset.
    Args:
        pred_boxlists(list[BoxList]): pred boxlist, has labels and scores fields.
        gt_boxlists(list[BoxList]): ground truth boxlist, has labels field.
        iou_thresh: iou thresh
        use_07_metric: boolean
    Returns:
        dict represents the results
    """
    assert len(gt_boxlists) == len(
        pred_boxlists
    ), "Length of gt and pred lists need to be same."
    prec, rec = calc_detection_voc_prec_rec(
        pred_boxlists=pred_boxlists, gt_boxlists=gt_boxlists, iou_thresh=iou_thresh
    )
    calc_voc_prec(gt_boxlists, pred_boxlists, iou_thresh=0.5)
    ap = calc_detection_voc_ap(prec, rec, use_07_metric=use_07_metric)
    return {"ap": ap, "map": np.nanmean(ap)}

def get_confidence(dataset,image_id,pred_boxlist_0,pred_boxlist_1,pred_boxlist_2,pred_boxlist_3,pred_boxlist_4,pred_boxlist_5):
    output_folder0 = "/mnt/sde1/xiaoqianruan/OSHOT/outputs/clipart_prediction_0/"
    plot_labels(dataset,image_id,output_folder0)
    plot_prediction(image_id,pred_boxlist_0,output_folder0)
    
    output_folder1 = "/mnt/sde1/xiaoqianruan/OSHOT/outputs/clipart_prediction_1/"
    plot_labels(dataset,image_id,output_folder1)
    plot_prediction(image_id,pred_boxlist_1,output_folder1)
    
    output_folder2 = "/mnt/sde1/xiaoqianruan/OSHOT/outputs/clipart_prediction_2/"
    plot_labels(dataset,image_id,output_folder2)
    plot_prediction(image_id,pred_boxlist_2,output_folder2)
    
    output_folder3 = "/mnt/sde1/xiaoqianruan/OSHOT/outputs/clipart_prediction_3/"
    plot_labels(dataset,image_id,output_folder3)
    plot_prediction(image_id,pred_boxlist_3,output_folder3)
    
    output_folder4 = "/mnt/sde1/xiaoqianruan/OSHOT/outputs/clipart_prediction_4/"
    plot_labels(dataset,image_id,output_folder4)
    plot_prediction(image_id,pred_boxlist_4,output_folder4)
    
    output_folder5 = "/mnt/sde1/xiaoqianruan/OSHOT/outputs/clipart_prediction_5/"
    plot_labels(dataset,image_id,output_folder5)
    plot_prediction(image_id,pred_boxlist_5,output_folder5)
    
def compare_confidence(dataset,i,image_id,pred_boxlist,gt_boxlist,accuracy,result):
    iou_thresh = 0.5
    dic_acc = {}
    dic_no = {}
    pred_bbox = pred_boxlist.bbox.cpu().numpy()
    pred_label = pred_boxlist.get_field("labels").cpu().numpy()
    pred_score = pred_boxlist.get_field("scores").cpu().numpy()

    gt_bbox = gt_boxlist.bbox.cpu().numpy()
    gt_label = gt_boxlist.get_field("labels").cpu().numpy()
    gt_difficult = gt_boxlist.get_field("difficult").cpu().numpy()
    
    for l in np.unique(np.concatenate((pred_label,gt_label)).astype(int)):
        pred_mask_l = pred_label == l
        pred_bbox_l = pred_bbox[pred_mask_l]
        pred_score_l = pred_score[pred_mask_l]
        order_l = pred_score_l.argsort()[::-1]
        pred_bbox_l = pred_bbox_l[order_l]
        pred_score_l = pred_score_l[order_l]
        
        gt_mask_l = gt_label == l
        gt_bbox_l = gt_bbox[gt_mask_l]
        gt_difficult_l = gt_difficult[gt_mask_l]
        
        if len(pred_bbox_l)==0:
            continue
        if len(gt_bbox_l)==0:
            continue
        pred_bbox_l = pred_bbox_l.copy()
        pred_bbox_l[:,2:]+=1
        gt_bbox_l = gt_bbox_l.copy()
        gt_bbox_l[:,2:]+=1
        
        iou = boxlist_iou(
            BoxList(pred_bbox_l,gt_boxlist.size),
            BoxList(gt_bbox_l,gt_boxlist.size),
        ).numpy()
        gt_index = iou.argmax(axis=1)
        gt_index[iou.max(axis=1) < iou_thresh] = -1
        del iou
        index = np.logical_and(gt_index>=0, gt_difficult_l[gt_index] == 0)
        #print(pred_score_l[index])
        dic_acc[l] = (pred_bbox_l[index],pred_score_l[index])
        no_index = np.logical_and(gt_index<0,gt_difficult_l[gt_index]==0)
        dic_no[l] = (pred_bbox_l[no_index],pred_score_l[no_index])
        result+=np.sum(index!=0)
    else:
        label = np.unique(pred_label)
        for l in label:
            if l not in gt_label:
                pred_mask_l = pred_label == l
                pred_bbox_l = pred_bbox[pred_mask_l]
                pred_score_l = pred_score[pred_mask_l]
                order_l = pred_score_l.argsort()[::-1]
                pred_bbox_l = pred_bbox_l[order_l]
                pred_score_l = pred_score_l[order_l]
                dic_no[l] = (pred_bbox_l,pred_score_l)
    accuracy[i].append((dic_acc,dic_no))
    return result

@torch.no_grad()
def bbox_with_iou(proposal_box_used,proposal_box_now,thres1,thres2):
    if len(proposal_box_used)==0 or len(proposal_box_now)==0:
        return proposal_box_now
    scores_used = proposal_box_used.get_field("scores").cpu().numpy()
    scores_now = proposal_box_now.get_field("scores").cpu().numpy()
    bbox_used = proposal_box_used.bbox.cpu().numpy()
    bbox_now = proposal_box_now.bbox.cpu().numpy()
    label_now = proposal_box_now.get_field("labels").cpu().numpy()
    label_used = proposal_box_used.get_field("labels").cpu().numpy()
    valid_map = np.logical_not(np.isin(label_now,label_used))
    valid_map = np.logical_and(valid_map,scores_now>thres1)
    #valid_map = np.logical_or(valid_map,scores_now>0.45)
    for l in np.unique(np.concatenate((label_used, label_now)).astype(int)):
        pred_mask_l = label_used == l
        pred_bbox_l = bbox_used[pred_mask_l]
        pred_score_l = scores_used[pred_mask_l]
        
        pred_mask_n = label_now == l
        pred_bbox_n = bbox_now[pred_mask_n]
        pred_score_n = scores_now[pred_mask_n]
        
        if len(pred_bbox_l)==0:
            continue
        if len(pred_bbox_n)==0:
            continue
        pred_bbox_l = pred_bbox_l.copy()
        pred_bbox_l[:,2:]+=1
        pred_bbox_n = pred_bbox_n.copy()
        pred_bbox_n[:,2:]+=1
        
        iou = boxlist_iou(
            BoxList(pred_bbox_n,proposal_box_now.size),
            BoxList(pred_bbox_l,proposal_box_now.size),
        ).numpy()
        valid_map[pred_mask_n] = np.logical_or(valid_map[pred_mask_n],iou.max(axis=1)>thres2)
    image_shape = proposal_box_now.size
    new_box_loc = proposal_box_now.bbox[valid_map,:]
    new_boxes = BoxList(new_box_loc,image_shape)
    new_boxes.add_field("scores",proposal_box_now.get_field("scores")[valid_map])
    new_boxes.add_field("labels",proposal_box_now.get_field("labels")[valid_map])
    return new_boxes

def analysis_iou(pred_boxlist0,pred_boxlist1,gt_boxlist,correct_labels,wrong_labels1,wrong_labels2):
    iou_thresh = 0.5
    
    pred_boxlist = bbox_with_iou(pred_boxlist0,pred_boxlist1,1.0,0.75)
    
    pred_bbox = pred_boxlist.bbox.cpu().numpy()
    pred_label = pred_boxlist.get_field("labels").cpu().numpy()
    pred_score = pred_boxlist.get_field("scores").cpu().numpy()
    
    gt_bbox = gt_boxlist.bbox.cpu().numpy()
    gt_label = gt_boxlist.get_field("labels").cpu().numpy()
    gt_difficult = gt_boxlist.get_field("difficult").cpu().numpy()
    
    for l in np.unique(np.concatenate((pred_label,gt_label)).astype(int)):
        pred_mask_l = pred_label == l
        pred_bbox_l = pred_bbox[pred_mask_l]
        pred_score_l = pred_score[pred_mask_l]
        order_l = pred_score_l.argsort()[::-1]
        pred_bbox_l = pred_bbox_l[order_l]
        pred_score_l = pred_score_l[order_l]
        
        gt_mask_l = gt_label == l
        gt_bbox_l = gt_bbox[gt_mask_l]
        gt_difficult_l = gt_difficult[gt_mask_l]
        
        if len(pred_bbox_l)==0:
            continue
        if len(gt_bbox_l)==0:
            continue
        pred_bbox_l = pred_bbox_l.copy()
        pred_bbox_l[:,2:]+=1
        gt_bbox_l = gt_bbox_l.copy()
        gt_bbox_l[:,2:]+=1
        
        iou = boxlist_iou(
            BoxList(pred_bbox_l,gt_boxlist.size),
            BoxList(gt_bbox_l,gt_boxlist.size),
        ).numpy()
        gt_index = iou.argmax(axis=1)
        gt_index[iou.max(axis=1) < iou_thresh] = -1
        del iou
        index = np.logical_and(gt_index>=0, gt_difficult_l[gt_index] == 0)
        correct_labels += np.sum(index!=0)
        no_index = np.logical_and(gt_index<0,gt_difficult_l[gt_index]==0)
        wrong_labels1 += np.sum(index==0)
    else:
        label = np.unique(pred_label)
        for l in label:
            if l not in gt_label:
                pred_mask_l = pred_label == l
                pred_bbox_l = pred_bbox[pred_mask_l]
                pred_score_l = pred_score[pred_mask_l]
                order_l = pred_score_l.argsort()[::-1]
                pred_bbox_l = pred_bbox_l[order_l]
                pred_score_l = pred_score_l[order_l]
                wrong_labels2+=len(pred_score_l)
    return correct_labels,wrong_labels1,wrong_labels2

def analysis_results(accuracy):
    correct_list = []
    wrong_list = []
    for m,acc0 in enumerate(accuracy[0]):
        for key in acc0[0].keys():
            correct_confidence = acc0[0][key][1]
            correct_list.extend(correct_confidence)
            correct_list = sorted(correct_list)
            correct_number = len(correct_list)
            correct_list_new = [x for x in correct_list if x > 0.15]
            correct_threshold = len(correct_list_new)
        for key in acc0[1].keys():
            wrong_confidence = acc0[1][key][1]
            wrong_list.extend(wrong_confidence)
            wrong_list = sorted(wrong_list)
            wrong_number = len(wrong_list)
            wrong_list_new = [x for x in wrong_list if x > 0.15]
            wrong_threshold = len(wrong_list_new)
    print(correct_number,correct_threshold)
    print(wrong_number,wrong_threshold)
"""  
def analysis_results(accuracy):
    print(len(accuracy))
    print(len(accuracy[0]))
    for m, acc0 in enumerate(accuracy[0]):
        with open("result"+str(m)+".txt","a") as f:
            for key in acc0[0].keys():
                f.writelines(str(key)+"\n")
                f.writelines(str(acc0[0][key])+"\n")
            f.writelines("\n")
            for key in accuracy[1][m][0].keys():
                f.writelines(str(key)+str("\n"))
                f.writelines(str(accuracy[1][m][0][key])+str("\n"))
            f.writelines("\n")
            for key in accuracy[2][m][0].keys():
                f.writelines(str(key)+"\n")
                f.writelines(str(accuracy[2][m][0][key])+str("\n"))
            f.writelines("\n")
            for key in accuracy[3][m][0].keys():
                f.writelines(str(key)+"\n")
                f.writelines(str(accuracy[3][m][0][key])+str("\n"))
            f.writelines("\n")
            for key in accuracy[4][m][0].keys():
                f.writelines(str(key)+"\n")
                f.writelines(str(accuracy[4][m][0][key])+str("\n"))
            f.writelines("\n") 
            for key in accuracy[5][m][0].keys():
                f.writelines(str(key)+"\n")
                f.writelines(str(accuracy[5][m][0][key])+str("\n"))
            f.writelines("\n")
            f.writelines("\n")
            for key in acc0[1].keys():
                f.writelines(str(key)+"\n")
                f.writelines(str(acc0[1][key])+"\n")
            f.writelines("\n")
            for key in accuracy[1][m][1].keys():
                f.writelines(str(key)+str("\n"))
                f.writelines(str(accuracy[1][m][1][key])+str("\n"))
            f.writelines("\n")
            for key in accuracy[2][m][1].keys():
                f.writelines(str(key)+"\n")
                f.writelines(str(accuracy[2][m][1][key])+str("\n"))
            f.writelines("\n")
            for key in accuracy[3][m][1].keys():
                f.writelines(str(key)+"\n")
                f.writelines(str(accuracy[3][m][1][key])+str("\n"))
            f.writelines("\n")
            for key in accuracy[4][m][1].keys():
                f.writelines(str(key)+"\n")
                f.writelines(str(accuracy[4][m][1][key])+str("\n"))
            f.writelines("\n") 
            for key in accuracy[5][m][1].keys():
                f.writelines(str(key)+"\n")
                f.writelines(str(accuracy[5][m][1][key])+str("\n"))
        f.close()
""" 
                        
def test_iou(dataset,image_id,pred_boxlist,gt_boxlist,output_folder,color):
    iou_thresh = 0.5
    pred_bbox = pred_boxlist.bbox.cpu().numpy()
    pred_label = pred_boxlist.get_field("labels").cpu().numpy()
    pred_score = pred_boxlist.get_field("scores").cpu().numpy()
    
    gt_bbox = gt_boxlist.bbox.cpu().numpy()
    gt_label = gt_boxlist.get_field("labels").cpu().numpy()
    gt_difficult = gt_boxlist.get_field("difficult").cpu().numpy()
    
    for l in np.unique(pred_label):
        if l not in gt_label:
            pred_mask_l = pred_label == l
            pred_bbox_l = pred_bbox[pred_mask_l]
            pred_score_l = pred_score[pred_mask_l]
            order = pred_score_l.argsort()[::-1]
            pred_bbox_l = pred_bbox_l[order]
            pred_score_l = pred_score_l[order]
            #plot_comparision(image_id,pred_bbox,pred_score,l,output_folder,color)
        else:
            pred_mask_l = pred_label == l
            pred_bbox_l = pred_bbox[pred_mask_l]
            pred_score_l = pred_score[pred_mask_l]
            order = pred_score_l.argsort()[::-1]
            pred_bbox_l = pred_bbox_l[order]
            pred_score_l = pred_score_l[order]
            
            gt_mask_l = gt_label == l
            gt_bbox_l = gt_bbox[gt_mask_l]
            gt_difficult_l = gt_difficult[gt_mask_l]
            
            if len(pred_bbox_l) == 0:
                continue
            if len(gt_bbox_l) == 0:
                continue
            pred_bbox_l = pred_bbox_l.copy()
            pred_bbox_l[:, 2:] += 1
            gt_bbox_l = gt_bbox_l.copy()
            gt_bbox_l[:, 2:] += 1
            iou = boxlist_iou(
                BoxList(pred_bbox_l, gt_boxlist.size),
                BoxList(gt_bbox_l, gt_boxlist.size),
            ).numpy()
            gt_index = iou.argmax(axis=1)
            gt_index[iou.max(axis=1) < iou_thresh] = -1
            del iou
      
            index = np.logical_and(gt_index>=0, gt_difficult_l[gt_index] == 0)
            no_index = np.logical_and(gt_index<0,gt_difficult_l[gt_index]==0)
            
    #for l in np.unique(np.concatenate((pred_label, gt_label)).astype(int)):
    #    pred_mask_l = pred_label == l
    #    pred_bbox_l = pred_bbox[pred_mask_l]
    #    pred_score_l = pred_score[pred_mask_l]
    #    order = pred_score_l.argsort()[::-1]
    #    pred_bbox_l = pred_bbox_l[order]
    #    pred_score_l = pred_score_l[order]

    #    gt_mask_l = gt_label == l
    #    gt_bbox_l = gt_bbox[gt_mask_l]
    #    gt_difficult_l = gt_difficult[gt_mask_l]
                
    #    if len(pred_bbox_l) == 0:
    #        continue
    #    if len(gt_bbox_l) == 0:
    #        continue
    #    pred_bbox_l = pred_bbox_l.copy()
    #    pred_bbox_l[:, 2:] += 1
    #    gt_bbox_l = gt_bbox_l.copy()
    #    gt_bbox_l[:, 2:] += 1
    #    iou = boxlist_iou(
    #        BoxList(pred_bbox_l, gt_boxlist.size),
    #        BoxList(gt_bbox_l, gt_boxlist.size),
    #    ).numpy()
    #    gt_index = iou.argmax(axis=1)
    #    gt_index[iou.max(axis=1) < iou_thresh] = -1
    #    del iou
      
    #    index = np.logical_and(gt_index>=0, gt_difficult_l[gt_index] == 0)
    #    plot_comparision_prediction(image_id,index,pred_bbox_l,pred_score_l,l,output_folder,color)

def compare_iou(dataset,image_id,pred_boxlist_0, pred_boxlist_1, pred_boxlist_2, pred_boxlist_3, pred_boxlist_4,pred_boxlist_5, gt_boxlist):
    iou_thresh = 0.5
    pred_bbox_0 = pred_boxlist_0.bbox.cpu().numpy()
    pred_label_0 = pred_boxlist_0.get_field("labels").cpu().numpy()
    pred_score_0 = pred_boxlist_0.get_field("scores").cpu().numpy()

    pred_bbox_1 = pred_boxlist_1.bbox.cpu().numpy()
    pred_label_1 = pred_boxlist_1.get_field("labels").cpu().numpy()
    pred_score_1 = pred_boxlist_1.get_field("scores").cpu().numpy()

    pred_bbox_2 = pred_boxlist_2.bbox.cpu().numpy()
    pred_label_2 = pred_boxlist_2.get_field("labels").cpu().numpy()
    pred_score_2 = pred_boxlist_2.get_field("scores").cpu().numpy()

    pred_bbox_3 = pred_boxlist_3.bbox.cpu().numpy()
    pred_label_3 = pred_boxlist_3.get_field("labels").cpu().numpy()
    pred_score_3 = pred_boxlist_3.get_field("scores").cpu().numpy()

    pred_bbox_4 = pred_boxlist_4.bbox.cpu().numpy()
    pred_label_4 = pred_boxlist_4.get_field("labels").cpu().numpy()
    pred_score_4 = pred_boxlist_4.get_field("scores").cpu().numpy()

    pred_bbox_5 = pred_boxlist_5.bbox.cpu().numpy()
    pred_label_5 = pred_boxlist_5.get_field("labels").cpu().numpy()
    pred_score_5 = pred_boxlist_5.get_field("scores").cpu().numpy()

    gt_bbox = gt_boxlist.bbox.cpu().numpy()
    gt_label = gt_boxlist.get_field("labels").cpu().numpy()
    gt_difficult = gt_boxlist.get_field("difficult").cpu().numpy()

    output_folder0 = "/mnt/sde1/xiaoqianruan/OSHOT/outputs/clipart_images0/"
    plot_labels(dataset,image_id,output_folder0)
    
    for l in np.unique(np.concatenate((pred_label_0, gt_label)).astype(int)):
        pred_mask_l = pred_label_0 == l
        pred_bbox_l = pred_bbox_0[pred_mask_l]
        pred_score_l = pred_score_0[pred_mask_l]
        order = pred_score_l.argsort()[::-1]
        pred_bbox_l = pred_bbox_l[order]
        pred_score_l = pred_score_l[order]

        gt_mask_l = gt_label == l
        gt_bbox_l = gt_bbox[gt_mask_l]
        gt_difficult_l = gt_difficult[gt_mask_l]

        pred_mask_n = pred_label_1 == l
        pred_bbox_n = pred_bbox_1[pred_mask_n]
        pred_score_n = pred_bbox_1[pred_mask_n]

        if len(pred_bbox_l) == 0:
            continue
        if len(gt_bbox_l) == 0:
            continue
        pred_bbox_l = pred_bbox_l.copy()
        pred_bbox_l[:, 2:] += 1
        gt_bbox_l = gt_bbox_l.copy()
        gt_bbox_l[:, 2:] += 1
        pred_bbox_n = pred_bbox_n.copy()
        pred_bbox_n[:,2:] += 1
        iou = boxlist_iou(
            BoxList(pred_bbox_l, gt_boxlist.size),
            BoxList(gt_bbox_l, gt_boxlist.size),
        ).numpy()
        gt_index = iou.argmax(axis=1)
        gt_index[iou.max(axis=1) < iou_thresh] = -1
        del iou
        
        index = np.logical_and(gt_index >= 0, gt_difficult_l[gt_index] == 0)
        acc_pred_bbox_l = pred_score_l[index]
        
        if len(acc_pred_bbox_l)==0:
            continue
            
        co_iou = boxlist_iou(
            BoxList(pred_bbox_l[index],pred_bbox_n.size),
            BoxList(pred_bbox_n,pred_bbox_n.size),
        ).numpy()
        del co_iou
        
        plot_comparision(image_id,index,pred_bbox_l,pred_score_l,l,output_folder0)

    output_folder1 = "/mnt/sde1/xiaoqianruan/OSHOT/outputs/clipart_images1/"
    plot_labels(dataset,image_id,output_folder1)
    
    for l in np.unique(np.concatenate((pred_label_1, gt_label)).astype(int)):
        pred_mask_l = pred_label_1 == l
        pred_bbox_l = pred_bbox_1[pred_mask_l]
        pred_score_l = pred_score_1[pred_mask_l]
        order = pred_score_l.argsort()[::-1]
        pred_bbox_l = pred_bbox_l[order]
        pred_score_l = pred_score_l[order]

        gt_mask_l = gt_label == l
        gt_bbox_l = gt_bbox[gt_mask_l]
        gt_difficult_l = gt_difficult[gt_mask_l]

        pred_mask_n = pred_label_2 == l
        pred_bbox_n = pred_bbox_2[pred_mask_n]
        pred_score_n = pred_score_2[pred_mask_n]

        if len(pred_bbox_l) == 0:
            continue
        if len(gt_bbox_l) == 0:
            continue
        pred_bbox_l = pred_bbox_l.copy()
        pred_bbox_l[:, 2:] += 1
        gt_bbox_l = gt_bbox_l.copy()
        gt_bbox_l[:, 2:] += 1
        iou = boxlist_iou(
            BoxList(pred_bbox_l, gt_boxlist.size),
            BoxList(gt_bbox_l, gt_boxlist.size),
        ).numpy()
        gt_index = iou.argmax(axis=1)
        gt_index[iou.max(axis=1) < iou_thresh] = -1
        del iou

        index = np.logical_and(gt_index >= 0, gt_difficult_l[gt_index] == 0)
        acc_pred_bbox_l = pred_score_l[index]
        if len(acc_pred_bbox_l)==0:
            continue
        co_iou = boxlist_iou(
            BoxList(pred_bbox_l[index],gt_boxlist.size),
            BoxList(pred_bbox_n,gt_boxlist.size),
        ).numpy()
        #print(co_iou)
        #print("!!!!!!!!!!!!!!prediction1!!!!!!!!!!!!!!!")
        del co_iou
        plot_comparision(image_id, index, pred_bbox_l, pred_score_l, l,output_folder1)

    output_folder2 = "/mnt/sde1/xiaoqianruan/OSHOT/outputs/clipart_images2/"
    plot_labels(dataset,image_id,output_folder2)
    
    for l in np.unique(np.concatenate((pred_label_2, gt_label)).astype(int)):
        pred_mask_l = pred_label_2 == l
        pred_bbox_l = pred_bbox_2[pred_mask_l]
        pred_score_l = pred_score_2[pred_mask_l]
        order = pred_score_l.argsort()[::-1]
        pred_bbox_l = pred_bbox_l[order]
        pred_score_l = pred_score_l[order]

        gt_mask_l = gt_label == l
        gt_bbox_l = gt_bbox[gt_mask_l]
        gt_difficult_l = gt_difficult[gt_mask_l]

        if len(pred_bbox_l) == 0:
            continue
        if len(gt_bbox_l) == 0:
            continue
        pred_bbox_l = pred_bbox_l.copy()
        pred_bbox_l[:, 2:] += 1
        gt_bbox_l = gt_bbox_l.copy()
        gt_bbox_l[:, 2:] += 1
        iou = boxlist_iou(
            BoxList(pred_bbox_l, gt_boxlist.size),
            BoxList(gt_bbox_l, gt_boxlist.size),
        ).numpy()
        gt_index = iou.argmax(axis=1)
        gt_index[iou.max(axis=1) < iou_thresh] = -1
        del iou

        index = np.logical_and(gt_index >= 0, gt_difficult_l[gt_index] == 0)
        acc_pred_bbox_l = pred_score_l[index]
        if len(acc_pred_bbox_l)==0:
            continue
        co_iou = boxlist_iou(
            BoxList(pred_bbox_l[index],gt_boxlist.size),
            BoxList(pred_bbox_n,gt_boxlist.size),
        ).numpy()
        #print(co_iou)
        #print("!!!!!!!!!!!!!!prediction2!!!!!!!!!!!!!!!")
        plot_comparision(image_id, index, pred_bbox_l, pred_score_l, l,output_folder2)

    output_folder3 = "/mnt/sde1/xiaoqianruan/OSHOT/outputs/clipart_images3/"
    plot_labels(dataset,image_id,output_folder3)
    
    for l in np.unique(np.concatenate((pred_label_3, gt_label)).astype(int)):
        pred_mask_l = pred_label_3 == l
        pred_bbox_l = pred_bbox_3[pred_mask_l]
        pred_score_l = pred_score_3[pred_mask_l]
        order = pred_score_l.argsort()[::-1]
        pred_bbox_l = pred_bbox_l[order]
        pred_score_l = pred_score_l[order]

        gt_mask_l = gt_label == l
        gt_bbox_l = gt_bbox[gt_mask_l]
        gt_difficult_l = gt_difficult[gt_mask_l]

        if len(pred_bbox_l) == 0:
            continue
        if len(gt_bbox_l) == 0:
            continue
        pred_bbox_l = pred_bbox_l.copy()
        pred_bbox_l[:, 2:] += 1
        gt_bbox_l = gt_bbox_l.copy()
        gt_bbox_l[:, 2:] += 1
        iou = boxlist_iou(
            BoxList(pred_bbox_l, gt_boxlist.size),
            BoxList(gt_bbox_l, gt_boxlist.size),
        ).numpy()
        gt_index = iou.argmax(axis=1)
        gt_index[iou.max(axis=1) < iou_thresh] = -1
        del iou

        index = np.logical_and(gt_index >= 0, gt_difficult_l[gt_index] == 0)
        acc_pred_bbox_l = pred_score_l[index]
        if len(acc_pred_bbox_l)==0:
            continue
        co_iou = boxlist_iou(
            BoxList(pred_bbox_l[index],gt_boxlist.size),
            BoxList(pred_bbox_n,gt_boxlist.size),
        ).numpy()
        #print(co_iou)
        #print("!!!!!!!!!!!!!!prediction3!!!!!!!!!!!!!!!")
        plot_comparision(image_id, index, pred_bbox_l, pred_score_l, l,output_folder3)

    output_folder4 = "/mnt/sde1/xiaoqianruan/OSHOT/outputs/clipart_images4/"
    plot_labels(dataset,image_id,output_folder4)
    
    for l in np.unique(np.concatenate((pred_label_4, gt_label)).astype(int)):
        pred_mask_l = pred_label_4 == l
        pred_bbox_l = pred_bbox_4[pred_mask_l]
        pred_score_l = pred_score_4[pred_mask_l]
        order = pred_score_l.argsort()[::-1]
        pred_bbox_l = pred_bbox_l[order]
        pred_score_l = pred_score_l[order]

        gt_mask_l = gt_label == l
        gt_bbox_l = gt_bbox[gt_mask_l]
        gt_difficult_l = gt_difficult[gt_mask_l]

        if len(pred_bbox_l) == 0:
            continue
        if len(gt_bbox_l) == 0:
            continue
        pred_bbox_l = pred_bbox_l.copy()
        pred_bbox_l[:, 2:] += 1
        gt_bbox_l = gt_bbox_l.copy()
        gt_bbox_l[:, 2:] += 1
        iou = boxlist_iou(
            BoxList(pred_bbox_l, gt_boxlist.size),
            BoxList(gt_bbox_l, gt_boxlist.size),
        ).numpy()
        gt_index = iou.argmax(axis=1)
        gt_index[iou.max(axis=1) < iou_thresh] = -1
        del iou

        index = np.logical_and(gt_index >= 0, gt_difficult_l[gt_index] == 0)
        acc_pred_bbox_l = pred_score_l[index]
        if len(acc_pred_bbox_l)==0:
            continue
        co_iou = boxlist_iou(
            BoxList(pred_bbox_l[index],gt_boxlist.size),
            BoxList(pred_bbox_n,gt_boxlist.size),
        ).numpy()
        #print(co_iou)
        #print("!!!!!!!!!!!!!!prediction4!!!!!!!!!!!!!!!")
        plot_comparision(image_id, index, pred_bbox_l, pred_score_l, l,output_folder4)

    for l in np.unique(np.concatenate((pred_label_5, gt_label)).astype(int)):
        pred_mask_l = pred_label_5 == l
        pred_bbox_l = pred_bbox_5[pred_mask_l]
        pred_score_l = pred_score_5[pred_mask_l]
        order = pred_score_l.argsort()[::-1]
        pred_bbox_l = pred_bbox_l[order]
        pred_score_l = pred_score_l[order]

        gt_mask_l = gt_label == l
        gt_bbox_l = gt_bbox[gt_mask_l]
        gt_difficult_l = gt_difficult[gt_mask_l]

        if len(pred_bbox_l) == 0:
            continue
        if len(gt_bbox_l) == 0:
            continue
        pred_bbox_l = pred_bbox_l.copy()
        pred_bbox_l[:, 2:] += 1
        gt_bbox_l = gt_bbox_l.copy()
        gt_bbox_l[:, 2:] += 1
        iou = boxlist_iou(
            BoxList(pred_bbox_l, gt_boxlist.size),
            BoxList(gt_bbox_l, gt_boxlist.size),
        ).numpy()
        gt_index = iou.argmax(axis=1)
        gt_index[iou.max(axis=1) < iou_thresh] = -1
        del iou

        index = np.logical_and(gt_index >= 0, gt_difficult_l[gt_index] == 0)
        acc_pred_bbox_l = pred_score_l[index]
        output_folder5 = "/mnt/sde1/xiaoqianruan/OSHOT/outputs/clipart_images5/"
        plot_labels(dataset,image_id,output_folder5)
        plot_comparision(image_id, index, pred_bbox_l, pred_score_l, l,output_folder5)

def calc_voc_prec(gt_boxlists, pred_boxlists, iou_thresh=0.5):
    acc_confidence = defaultdict(list)
    for gt_boxlist, pred_boxlist in zip(gt_boxlists, pred_boxlists):
        pred_bbox = pred_boxlist.bbox.cpu().numpy()
        pred_label = pred_boxlist.get_field("labels").cpu().numpy()
        pred_score = pred_boxlist.get_field("scores").cpu().numpy()

        gt_bbox = gt_boxlist.bbox.cpu().numpy()
        gt_label = gt_boxlist.get_field("labels").cpu().numpy()
        gt_difficult = gt_boxlist.get_field("difficult").cpu().numpy()

        #for i in range(len(gt_label)):
        #    if gt_label[i] not in [2, 3, 7, 8, 12, 15]:
        #        gt_label[i] = 1

        for l in np.unique(np.concatenate((pred_label, gt_label)).astype(int)):
            pred_mask_l = pred_label == l
            pred_bbox_l = pred_bbox[pred_mask_l]
            pred_score_l = pred_score[pred_mask_l]
            order = pred_score_l.argsort()[::-1]
            pred_bbox_l = pred_bbox_l[order]
            pred_score_l = pred_score_l[order]

            gt_mask_l = gt_label == l
            gt_bbox_l = gt_bbox[gt_mask_l]
            gt_difficult_l = gt_difficult[gt_mask_l]

            if len(pred_bbox_l) == 0:
                continue
            if len(gt_bbox_l) == 0:
                continue
            pred_bbox_l = pred_bbox_l.copy()
            pred_bbox_l[:, 2:] += 1
            gt_bbox_l = gt_bbox_l.copy()
            gt_bbox_l[:, 2:] += 1
            iou = boxlist_iou(
                BoxList(pred_bbox_l, gt_boxlist.size),
                BoxList(gt_bbox_l, gt_boxlist.size),
            ).numpy()
            gt_index = iou.argmax(axis=1)
            gt_index[iou.max(axis=1) < iou_thresh] = -1
            del iou

            index = np.logical_and(gt_index >= 0, gt_difficult_l[gt_index] == 0)
            acc_pred_bbox_l = pred_score_l[index]
            acc_confidence[l].append(acc_pred_bbox_l)
    with open("confidence_predictions_5.txt", "a") as f:
        for label in acc_confidence.keys():
            f.writelines(str(label) + str("\n"))
            for confidence in acc_confidence[label]:
                if confidence == []:
                    continue
                else:
                    f.writelines(str(confidence) + str("\n"))

def calc_detection_voc_prec_rec(gt_boxlists, pred_boxlists, iou_thresh=0.5):
    """Calculate precision and recall based on evaluation code of PASCAL VOC.
    This function calculates precision and recall of
    predicted bounding boxes obtained from a dataset which has :math:`N`
    images.
    The code is based on the evaluation code used in PASCAL VOC Challenge.
   """
    n_pos = defaultdict(int)
    score = defaultdict(list)
    match = defaultdict(list)
    for gt_boxlist, pred_boxlist in zip(gt_boxlists, pred_boxlists):
        pred_bbox = pred_boxlist.bbox.cpu().numpy()
        pred_label = pred_boxlist.get_field("labels").cpu().numpy()
        pred_score = pred_boxlist.get_field("scores").cpu().numpy()
        gt_bbox = gt_boxlist.bbox.cpu().numpy()
        gt_label = gt_boxlist.get_field("labels").cpu().numpy()
        gt_difficult = gt_boxlist.get_field("difficult").cpu().numpy()
        #for i in range(len(gt_label)):
        #    if gt_label[i] not in [2, 3, 7, 8, 12, 15]:
        #        gt_label[i] = 1
        for l in np.unique(np.concatenate((pred_label, gt_label)).astype(int)):
            pred_mask_l = pred_label == l
            pred_bbox_l = pred_bbox[pred_mask_l]
            pred_score_l = pred_score[pred_mask_l]
            # sort by score
            order = pred_score_l.argsort()[::-1]
            pred_bbox_l = pred_bbox_l[order]
            pred_score_l = pred_score_l[order]

            gt_mask_l = gt_label == l
            gt_bbox_l = gt_bbox[gt_mask_l]
            gt_difficult_l = gt_difficult[gt_mask_l]

            n_pos[l] += np.logical_not(gt_difficult_l).sum()
            score[l].extend(pred_score_l)

            if len(pred_bbox_l) == 0:
                continue
            if len(gt_bbox_l) == 0:
                match[l].extend((0,) * pred_bbox_l.shape[0])
                continue

            # VOC evaluation follows integer typed bounding boxes.
            pred_bbox_l = pred_bbox_l.copy()
            pred_bbox_l[:, 2:] += 1
            gt_bbox_l = gt_bbox_l.copy()
            gt_bbox_l[:, 2:] += 1
            iou = boxlist_iou(
                BoxList(pred_bbox_l, gt_boxlist.size),
                BoxList(gt_bbox_l, gt_boxlist.size),
            ).numpy()
            gt_index = iou.argmax(axis=1)
            # set -1 if there is no matching ground truth
            gt_index[iou.max(axis=1) < iou_thresh] = -1
            del iou

            selec = np.zeros(gt_bbox_l.shape[0], dtype=bool)
            for gt_idx in gt_index:
                if gt_idx >= 0:
                    if gt_difficult_l[gt_idx]:
                        match[l].append(-1)
                    else:
                        if not selec[gt_idx]:
                            match[l].append(1)
                        else:
                            match[l].append(0)
                    selec[gt_idx] = True
                else:
                    match[l].append(0)

    n_fg_class = max(n_pos.keys()) + 1
    prec = [None] * n_fg_class
    rec = [None] * n_fg_class

    for l in n_pos.keys():
        score_l = np.array(score[l])
        match_l = np.array(match[l], dtype=np.int8)

        order = score_l.argsort()[::-1]
        match_l = match_l[order]

        tp = np.cumsum(match_l == 1)
        fp = np.cumsum(match_l == 0)

        # If an element of fp + tp is 0,
        # the corresponding element of prec[l] is nan.
        prec[l] = tp / (fp + tp)
        # If n_pos[l] is 0, rec[l] is None.
        if n_pos[l] > 0:
            rec[l] = tp / n_pos[l]

    return prec, rec


def calc_detection_voc_ap(prec, rec, use_07_metric=False):
    """Calculate average precisions based on evaluation code of PASCAL VOC.
    This function calculates average precisions
    from given precisions and recalls.
    The code is based on the evaluation code used in PASCAL VOC Challenge.
    Args:
        prec (list of numpy.array): A list of arrays.
            :obj:`prec[l]` indicates precision for class :math:`l`.
            If :obj:`prec[l]` is :obj:`None`, this function returns
            :obj:`numpy.nan` for class :math:`l`.
        rec (list of numpy.array): A list of arrays.
            :obj:`rec[l]` indicates recall for class :math:`l`.
            If :obj:`rec[l]` is :obj:`None`, this function returns
            :obj:`numpy.nan` for class :math:`l`.
        use_07_metric (bool): Whether to use PASCAL VOC 2007 evaluation metric
            for calculating average precision. The default value is
            :obj:`False`.
    Returns:
        ~numpy.ndarray:
        This function returns an array of average precisions.
        The :math:`l`-th value corresponds to the average precision
        for class :math:`l`. If :obj:`prec[l]` or :obj:`rec[l]` is
        :obj:`None`, the corresponding value is set to :obj:`numpy.nan`.
    """

    n_fg_class = len(prec)
    ap = np.empty(n_fg_class)
    for l in range(n_fg_class):
        if prec[l] is None or rec[l] is None:
            ap[l] = np.nan
            continue

        if use_07_metric:
            # 11 point metric
            ap[l] = 0
            for t in np.arange(0.0, 1.1, 0.1):
                if np.sum(rec[l] >= t) == 0:
                    p = 0
                else:
                    p = np.max(np.nan_to_num(prec[l])[rec[l] >= t])
                ap[l] += p / 11
        else:
            # correct AP calculation
            # first append sentinel values at the end
            mpre = np.concatenate(([0], np.nan_to_num(prec[l]), [0]))
            mrec = np.concatenate(([0], rec[l], [1]))

            mpre = np.maximum.accumulate(mpre[::-1])[::-1]

            # to calculate area under PR curve, look for points
            # where X axis (recall) changes value
            i = np.where(mrec[1:] != mrec[:-1])[0]

            # and sum (\Delta recall) * prec
            ap[l] = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])

    return ap