from argparse import ArgumentParser

import cv2
import numpy as np

from mmdet.apis import inference_detector, init_detector, show_result_pyplot
import pdb
# python image_demo.py demo.jpg ../configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py ../weights/faster_rcnn_r50_fpn_1x_coco-person_20201216_175929-d022e227.pth --device cuda:0 --score-thr 0.4
def main_img():
    parser = ArgumentParser()
    parser.add_argument('img', help='Image file')
    parser.add_argument('config', help='Config file')
    parser.add_argument('checkpoint', help='Checkpoint file')
    parser.add_argument(
        '--device', default='cuda:0', help='Device used for inference')
    parser.add_argument(
        '--score-thr', type=float, default=0.5, help='bbox score threshold')
    args = parser.parse_args()

    # build the model from a config file and a checkpoint file
    model = init_detector(args.config, args.checkpoint, device=args.device)
    # pdb.set_trace()
    # test a single image
    result = inference_detector(model, args.img)
    # show the results
    show_result_pyplot(model, args.img, result, score_thr=args.score_thr)
import glob
import os.path as osp
import mmcv
from tqdm import tqdm
from collections import OrderedDict

import pickle as pk

def pickle(data, file_path):
    with open(file_path, "wb") as f:
        pk.dump(data, f, pk.HIGHEST_PROTOCOL)


def unpickle(file_path):
    with open(file_path, "rb") as f:
        data = pk.load(f)
    return data

def main_img_dir():
    parser = ArgumentParser()
    parser.add_argument('--img_dir',
                        default='/home2/0dataset/DukeMTMC-reID/bounding_box_train',help='Image file')
    parser.add_argument('--config',
    # default='../configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py',help='Config file')
    default='../configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco.py',help='Config file')
    parser.add_argument('--checkpoint',
    # default='../weights/faster_rcnn_r50_fpn_1x_coco-person_20201216_175929-d022e227.pth',help='Checkpoint file')
    default='../weights/mask_rcnn_x101_64x4d_fpn_1x_coco_20200201-9352eb0d.pth',help='Checkpoint file')
    parser.add_argument(
        '--device', default='cuda:0', help='Device used for inference')
    parser.add_argument(
        '--score-thr', type=float, default=0.5, help='bbox score threshold')
    args = parser.parse_args()

    model = init_detector(args.config, args.checkpoint, device=args.device)

    img_list = glob.glob(osp.join(args.img_dir,'*jpg'))
    i = 0
    outputs_dict=OrderedDict()
    for frame in tqdm(img_list):
        result = inference_detector(model, frame)
        if len(result[0][0])>0:
            #result[[boxes 80类]，[seg 80类]]
            #result[1][0][0].shape 图像中第一个人的分割结果，ture是人，False不是，
            #置信度阈值result[0][0][0][-1]是人box得分
            # 取图中面积最大，置信度最高一个人box
            boxes=result[0][0]
            seges=result[1][0]
            mj=(boxes.T[:][2]-boxes.T[:][0])*(boxes.T[:][3]-boxes.T[:][1])
            ind=np.argmax(mj)
            box=[int(x) if i!=4 else x for i,x in enumerate(boxes[ind])]
            seg=seges[ind]
            pid=int(frame.split('/')[-1].split('_')[0])
            anno={
                'pid':pid,
                'img_path':frame,
                'box':box,
                'seg':seg}
            outputs_dict[frame]=anno
            showmask=False
            if showmask==True:
                img=cv2.imread(frame)
                #绘制box，mask
                cv2.rectangle(img,(box[0],box[1]),(box[2],box[3]),(0,0,255),2)
                bg=img.copy()
                cv2.imshow('bg',bg)
                # cv2.imshow('i',img)

                inds=np.where(seg==True)
                for i in range(len(inds[0])):
                    y=inds[0][i]
                    x=inds[1][i]
                    img[y,x,:]=0
                cv2.imshow('i',img)
                cv2.waitKey()
            # h=box[3]-box[1]
            # w=box[2]-box[0]
            #bg.shape = img.shape
            # pdb.set_trace()
            # bg = np.where()
            # bg[y1:h+y1,x1:w+x1,:]=img[box[1]:box[1]+h,box[0]:box[0]+w,:]
            # cv2.imshow('bg',bg)
        # show the results
        # show_result_pyplot(model, frame, result, score_thr=0.5)
        # print(i,'person num:',len(result), frame,'finished')
        i = i+1
        # print(anno)
    print('imgnum:{}, masknum:{}'.format(len(img_list),len(outputs_dict)))
    save_dir =frame.split('bounding_box_train')[0]
    pickle(outputs_dict,save_dir+'duke_mask.pkl')

        # show_result_pyplot(model, img, result, score_thr=args.score_thr)



if __name__ == '__main__':
    main_img_dir()
    # main_img()
