from __future__ import print_function

import os
import cv2
import torch
import codecs
import zipfile
import shutil
import argparse
import sys
sys.path.append('datasets/DOTA_devkit')

from tqdm import tqdm
from datasets import *
from models.model import RetinaNet
from utils.detect import im_detect
from utils.bbox import rbox_2_aabb, rbox_2_quad
from utils.utils import sort_corners, is_image, hyp_parse
from utils.map import eval_mAP



DATASETS = {'UCAS_AOD':NWPUDataset,#原始是NWPU_VHR
            'SSDD':SSDDDataset,
            'HRSID':SSDDDataset,
            }


def data_evaluate(args,model, 
                  target_size, 
                  test_path,
                  conf = 0.01,
                  dataset=None):
    root_dir = args.save_path
    out_dir = os.path.join(root_dir,'detection-results')
    if  os.path.exists(out_dir):
        shutil.rmtree(out_dir)
    os.makedirs(out_dir)

    ds = DATASETS[dataset](args)

    with open(test_path,'r') as f:
        if dataset == 'VOC':
            im_dir = test_path.replace('/ImageSets/Main/test.txt','/JPEGImages')
            ims_list = [os.path.join(im_dir, x.strip('\n')+'.jpg') for x in f.readlines()]
        else:
            ims_list = [os.path.join(args.data_path,"JPEGImages",x.strip()+".jpg") for x in f.readlines()]
    s = ('%20s' + '%10s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@0.5', 'Hmean')
    nt = 0
    for idx, im_path in enumerate(tqdm(ims_list, desc=s)):
        im_name = os.path.split(im_path)[1]
        im = cv2.cvtColor(cv2.imread(im_path, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
        dets = im_detect(model, im, target_sizes=target_size, conf = conf)
        nt += len(dets)
#-----------------------原始保存检测结果的方式----------------------------------------
        out_file = os.path.join(out_dir,  im_name[:im_name.rindex('.')] + '.txt')
        with codecs.open(out_file, 'w', 'utf-8') as f:
            if dets.shape[0] == 0:
                f.close()
                continue
            res = sort_corners(rbox_2_quad(dets[:, 2:]))
            for k in range(dets.shape[0]):
                f.write('{} {:.8f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f}\n'.format(
                    ds.return_class(dets[k, 0]), dets[k, 1],
                    res[k, 0], res[k, 1], res[k, 2], res[k, 3],
                    res[k, 4], res[k, 5], res[k, 6], res[k, 7])
                )
#------------------------------------------------------------------------------------

#-----------------------按bba和polar的保存方式---------------------------------------

#         with open(os.path.join(out_dir, 'Task1_{}.txt'.format(ds.return_class(dets[k, 0]))), 'w') as f:
#             for img_id in results[cat]:
#                 for pt in results[cat][img_id]:
#                     f.write('{} {:.12f} {:.1f} {:.1f} {:.1f} {:.1f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.format(
#                         img_id, pt[8], pt[0], pt[1], pt[2], pt[3], pt[4], pt[5], pt[6], pt[7]))
#             for k in range(dets.shape[0]):
#                 f.write('{} {:.2f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f}\n'.format(
#                     ds.return_class(dets[k, 0]), dets[k, 1],
#                     res[k, 0], res[k, 1], res[k, 2], res[k, 3],
#                     res[k, 4], res[k, 5], res[k, 6], res[k, 7])
#                 )
#------------------------------------------------------------------------------------
#         assert len(os.listdir(os.path.join(root_dir,'ground-truth'))) != 0, 'No labels found in test/ground-truth!! '
    mAP = eval_mAP(args,root_dir, use_07_metric=False,thres = 0.5)
    # display result
    pf = '%20s' + '%10.3g' * 6  # print format    
    print(pf % ('all', len(ims_list), nt, 0, 0, mAP, 0))
    # thres = [0.5,0.6,0.7,0.8,0.9]
    # for thre in thres:
    #     ap = eval_mAP(root_dir, use_07_metric=True, thres = thre)
    #     print('ap_{}:  {}'.format(thre, ap))
    return 0, 0, mAP, 0 






def evaluate(args,target_size,
             test_path,
             dataset,
             backbone=None, 
             weight=None, 
             model=None,
             hyps=None,
             conf=0.3):
    if model is None:
        model = RetinaNet(backbone=backbone,hyps=hyps)
        if weight.endswith('.pth'):
            chkpt = torch.load(weight)
            # load model
            if 'model' in chkpt.keys():
                model.load_state_dict(chkpt['model'])
            else:
                model.load_state_dict(chkpt)

    model.eval()
    if torch.cuda.is_available():
        model.cuda()

    if dataset in ['NWPU_VHR','SSDD','HRSID']:
        results = data_evaluate(args,model, target_size, test_path, conf, dataset)
    else:
        raise RuntimeError('Unsupported dataset!')
    return results


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Hyperparams')
    parser.add_argument('--backbone', dest='backbone', default='res50', type=str)
    parser.add_argument('--weight', type=str, default='../_Weights/dal/HRSID/_retina_true/best.pth')
    #../_Weights/dal/HRSID/dal_800_bs4_nomix/best.pth
    parser.add_argument('--target_size', dest='target_size', default=[800], type=int) 
    parser.add_argument('--hyp', type=str, default='hyp.py', help='hyper-parameter path')

    parser.add_argument('--dataset', nargs='?', type=str, default='HRSID')
    parser.add_argument('--train_path', type=str, default='../_DATASET/SSDD/ImageSets/train.txt')
    parser.add_argument('--test_path', type=str, default='../_DATASET/SSDD/ImageSets/test.txt')
    parser.add_argument('--data_path', type=str, default='../_DATASET/SSDD')
    parser.add_argument('--gt_path', type=str, default='../_DATASET/SSDD/Ground-Truth')

    args = parser.parse_args()
    hyps = hyp_parse(args.hyp)
    if args.dataset == "SSDD":
        args.train_path = '../_DATASET/SSDD_dal/ImageSets/train.txt'
        args.test_path = '../_DATASET/SSDD_dal/ImageSets/test.txt'
        args.data_path = '../_DATASET/SSDD_dal'
        args.gt_path = '../_DATASET/SSDD_dal/Ground-Truth'
    elif args.dataset == "HRSID":
        args.train_path = '../_DATASET/HRSID/ImageSets/train.txt'
        args.test_path = '../_DATASET/HRSID/ImageSets/test.txt'
        args.data_path = '../_DATASET/HRSID'
        args.gt_path = '../_DATASET/HRSID/Ground-Truth'
#     evaluate(arg.target_size,
#              arg.test_path,
#              arg.dataset,
#              arg.backbone,
#              arg.weight,
#              hyps = hyps)
    args.method = "_dal_true"
    args.save_path = "../"+ "_Weights/"+'dal/' + args.dataset + "/" +args.method
    model = RetinaNet(backbone=args.backbone, hyps=hyps)
    chkpt = torch.load(args.weight)
    model.load_state_dict(chkpt['model'])
    results = evaluate(args = args,target_size=args.target_size,
                        test_path=args.test_path,
                        dataset=args.dataset,
                        model=model, 
                        hyps=hyps,
                        conf = 0.05) 