import os
import sys
import cv2
import argparse
import numpy as np

import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import seaborn as sns
import time
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'lib'))

# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"  # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"] = '0'

import torch
import torch.utils.data

# from datasets.ssdd_coco import COCO_MEAN, COCO_STD, COCO_NAMES
# from datasets.hrsid_coco import COCO_MEAN, COCO_STD, COCO_NAMES
from datasets.nwpu import COCO_MEAN, COCO_STD, COCO_NAMES
# from datasets.pascal import VOC_MEAN, VOC_STD, VOC_NAMES

from utils.utils import load_model
from utils.image import transform_preds, get_affine_transform
from utils.post_process import ctdet_decode,ctdet_decode_csl_one

from nms.nms import soft_nms
import glob
# from nms import soft_nms

COCO_COLORS = sns.color_palette('Paired', 20)
print(COCO_COLORS)
# VOC_COLORS = sns.color_palette('hls', len(VOC_NAMES))

# Training settings
parser = argparse.ArgumentParser(description='centernet')

parser.add_argument('--root_dir', type=str, default='./')
parser.add_argument('--img_dir', type=str, default='../_DATASET/NWPU_0.6/images/train2017/')

parser.add_argument('--weight_dir', type=str, default='../_Weights/hcenternet/nwpu_0.6/dlanet34_cls_sml0.2_std0.02_L1.0_16(EGK_0.7_only_train_800_omega5)_v2+cls_reg/best_ap50.pth')
# parser.add_argument('--weight_dir', type=str, default='../_Weights/hcenternet/nwpu_0.6/dlanet34_dcn_cls_sml0.2_std0.02_L1.0_16(EGK_0.7_only_train_800_omega1)_v2+cls_reg/best_ap50.pth')
#parser.add_argument('--weight_dir', type=str, default='../_Weights/hcenternet/nwpu_0.6/dlanet34_cls_sml0.2_std0.02_L1.0_16(EGK_0.7_only_train_800_omega1)_v2+cls_reg/best_ap50.pth')
parser.add_argument('--method', type=str, default='cls_reg',choices=['wh','tblr','dense','tblr_cor','dfl','dfl_csl','dfl_csl_reg'])
parser.add_argument('--arch', type=str, default='dlanet', choices=['large_hourglass', 'resdcn', 'resnet_50','dlanet'])

parser.add_argument('--dataset', type=str, default='nwpu_coco_0.6', choices=['coco', 'pascal', 'nwpu_coco_0.2','nwpu_coco_0.6','dior_coco','ssdd_coco'])
parser.add_argument('--img_size', type=int, default=800)

parser.add_argument('--test_flip', action='store_true')
parser.add_argument('--test_scales', type=str, default='1')  # 0.5,0.75,1,1.25,1.5

parser.add_argument('--test_topk', type=int, default=100)#100 for ssdd,300 for hrsid 
parser.add_argument('--num_classes', type=int, default=10)

#---------------------------------对比实验参数设置-------------------------------
parser.add_argument('--alpha', type=int, default=0.7)#0.1,0.3,0.5,0.7,0.9 EGK的编码比例
parser.add_argument('--hm_kernel', type=str, default="EGK")#EGK 和 CGK,对特征图的编码方式  
parser.add_argument('--points', type=int, default=16)#4,8,16,24， SSB里面点的个数
parser.add_argument('--reg_max', type=int, default=100)#BRB 中的距离选取
parser.add_argument('--omega', type=int, default=5)#每一份的长度为5
#-------------------------------------------------------------------------------------------------------
cfg = parser.parse_args()

os.chdir(cfg.root_dir)

cfg.test_scales = [float(s) for s in cfg.test_scales.split(',')]


def main():
    save_path = 'results' + "/" + cfg.dataset + "/" + cfg.arch
    if not os.path.exists(save_path):
        os.makedirs(save_path)
        
#     os.environ["CUDA_VISIBLE_DEVICES"] = '0'
    cfg.device = torch.device('cuda')
    torch.backends.cudnn.benchmark = False

    
    print('Creating model...')
        
    cls_num = int(cfg.reg_max/cfg.omega)+1
    a = torch.arange(0, cls_num, dtype=torch.float32).cuda()
    heads = {'hm':cfg.num_classes, 'tblr': 4*cls_num, 'ssb':cfg.points}
    if 'dlanet' in cfg.arch:
        if 'dcn' in cfg.arch:
            from nets.dlanet_dcn import DlaNet
            model = DlaNet(cfg,num_classes=cfg.num_classes,heads = heads,head_conv=256)#试一下256效果怎么样
        else:
            from nets.dlanet import DlaNet
            model = DlaNet(cfg,num_classes=cfg.num_classes,head_conv=256)#试一下256效果怎么样
    else:
        raise NotImplementedError

    model.load_state_dict(torch.load(cfg.weight_dir ), strict=False)
    model = model.to(cfg.device)
    model.eval()
    
    
    max_per_image = 100
    img_paths = glob.glob(cfg.img_dir+'*.jpg')

#     print(img_paths)
    total_time = []
    for index,img_path in enumerate(img_paths):

        name = os.path.basename(img_path)
        #---------------------开始时间点------------------
        start_time = time.perf_counter()
        #-----------------------------------------------    
        image = cv2.imread(img_path)
        orig_image = image
        height, width = image.shape[0:2]
        padding = 127 if 'hourglass' in cfg.arch else 31
        imgs = {}
        for scale in cfg.test_scales:
            new_height = int(height * scale)
            new_width = int(width * scale)

            if cfg.img_size > 0:
                img_height, img_width = cfg.img_size, cfg.img_size
                center = np.array([new_width / 2., new_height / 2.], dtype=np.float32)
                scaled_size = max(height, width) * 1.0
                scaled_size = np.array([scaled_size, scaled_size], dtype=np.float32)
            else:
                img_height = (new_height | padding) + 1
                img_width = (new_width | padding) + 1
                center = np.array([new_width // 2, new_height // 2], dtype=np.float32)
                scaled_size = np.array([img_width, img_height], dtype=np.float32)

            img = cv2.resize(image, (new_width, new_height))
            trans_img = get_affine_transform(center, scaled_size, 0, [img_width, img_height])
            img = cv2.warpAffine(img, trans_img, (img_width, img_height))

            img = img.astype(np.float32) / 255.
            img -= np.array(COCO_MEAN , dtype=np.float32)[None, None, :]
            img /= np.array(COCO_STD , dtype=np.float32)[None, None, :]
            img = img.transpose(2, 0, 1)[None, :, :, :]  # from [H, W, C] to [1, C, H, W]

            if cfg.test_flip:
                img = np.concatenate((img, img[:, :, :, ::-1].copy()), axis=0)

            imgs[scale] = {'image': torch.from_numpy(img).float(),
                           'center': np.array(center),
                           'scale': np.array(scaled_size),
                           'fmap_h': np.array(img_height // 4),
                           'fmap_w': np.array(img_width // 4)}



        with torch.no_grad():
            detections = []
            for scale in imgs:
                imgs[scale]['image'] = imgs[scale]['image'].to(cfg.device)
                begin_time_decode = time.time()
                output = model(imgs[scale]['image'])[-1]
#                 print(output)
                end_time_decode = time.time()
                print("network_time",end_time_decode-begin_time_decode)
                begin_time_decode = time.time()
                if cfg.method == "cls_reg":
                    output = output[:-1]
                    dets = ctdet_decode_csl_one(*output,cls_num=cls_num, K=cfg.test_topk,omega = cfg.omega,a = a)
                end_time_decode = time.time()
                print("decode_time",end_time_decode-begin_time_decode)
                #---------------------------结束时间点-------------------------------
                torch.cuda.synchronize()
                elapsed = time.perf_counter() - start_time
                total_time.append(elapsed)
                print("process {}".format(index),elapsed)
                #-------------------------------------------------------------------  
                
                
                dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])[0]

                top_preds = {}
                dets[:, :2] = transform_preds(dets[:, 0:2],
                                              imgs[scale]['center'],
                                              imgs[scale]['scale'],
                                              (imgs[scale]['fmap_w'], imgs[scale]['fmap_h']))
                dets[:, 2:4] = transform_preds(dets[:, 2:4],
                                               imgs[scale]['center'],
                                               imgs[scale]['scale'],
                                               (imgs[scale]['fmap_w'], imgs[scale]['fmap_h']))
                cls = dets[:, -1]
                for j in range(cfg.num_classes):# if cfg.dataset == 'ssdd_coco',为2
                    inds = (cls == j)
                    top_preds[j + 1] = dets[inds, :5].astype(np.float32)
                    top_preds[j + 1][:, :4] /= scale

                detections.append(top_preds)
              

            bbox_and_scores = {}
            for j in range(1, cfg.num_classes+1):# if cfg.dataset == 'ssdd_coco',为2
                bbox_and_scores[j] = np.concatenate([d[j] for d in detections], axis=0)
                soft_nms(bbox_and_scores[j], Nt=0.5, method=2)
                if len(cfg.test_scales) > 1:
                    soft_nms(bbox_and_scores[j], Nt=0.5, method=2)
            scores = np.hstack([bbox_and_scores[j][:, 4] for j in range(1, cfg.num_classes+1)])
            

#             print(scores)
            if len(scores) > max_per_image:
                kth = len(scores) - max_per_image
                thresh = np.partition(scores, kth)[kth]
                for j in range(1, cfg.num_classes+1):
                    keep_inds = (bbox_and_scores[j][:, 4] >= thresh)
                    bbox_and_scores[j] = bbox_and_scores[j][keep_inds]


#             img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            img = orig_image
            colors = COCO_COLORS 
            names = COCO_NAMES 
            
#             print(bbox_and_scores)
            for category,lab in enumerate(bbox_and_scores):
#                 print(category)
#                 print(COCO_COLORS[category])
                for boxes in bbox_and_scores[lab]:
                    x1, y1, x2, y2, score = boxes
#                     print(score)
                    if score>0.3:
#                         color = [i*230 for i in list(COCO_COLORS[category])]
                        color = [0,0,255]
                        cv2.rectangle(img,(x1, y1),(x2, y2),color,2)
    total_time = total_time[1:]
    print('avg time is {}'.format(np.mean(total_time)))
    print('FPS is {}'.format(1./np.mean(total_time)))






if __name__ == '__main__':
    main()
