from __future__ import print_function

import os
import cv2
import torch
import codecs
import zipfile
import shutil
import argparse
import numpy as np
import sys
from datasets_loader.ssdd import SSDD
from datasets_loader.hrsid import HRSID
sys.path.append('datasets/DOTA_devkit')
from tqdm import tqdm
from utils.detect_new import im_detect
from utils.bbox import rbox_2_aabb, rbox_2_quad
from utils.utils import sort_corners, is_image, hyp_parse
from utils.map import eval_mAP
sys.path.append(r'./backbone')



DATASETS = {
            'SSDD':SSDD,
            'HRSID':HRSID
            }

def data_evaluate(args,
                  model, 
                  down_ratio):

    root_dir = args.save_path
    out_dir = os.path.join(root_dir,'detection-results')
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    ds = DATASETS[args.dataset](args,split='val')
    img_dir = os.path.join(args.data_path,'JPEGImages')    
    with open(args.test_path,'r') as f:
        ims_list=[os.path.join(img_dir, x.strip()+'.jpg') for x in f.readlines()]
#         print(ims_list)
    s = ('%20s' + '%10s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@0.5', 'Hmean')
    nt = 0
    
    for idx, im_path in enumerate(tqdm(ims_list, desc=s)):
#         print("111111111111",im_path)
        im_name = os.path.split(im_path)[1]       
        im = cv2.cvtColor(cv2.imread(im_path, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
        dets = im_detect(args,im_path,model,down_ratio)#得到预测结果    
        nt += len(dets)
        out_file = os.path.join(out_dir,  im_name[:im_name.rindex('.')] + '.txt')
#         print(out_file)
        with codecs.open(out_file, 'w', 'utf-8') as f:#预测结果       
            if dets.shape[0] == 0:
                f.close()
                continue
            res = dets[:,2:]
            
###################################################################
#自己已经做了变换，不需要这行代码
#             res = sort_corners(rbox_2_quad(dets[:, 2:]))
###################################################################
            for k in range(dets.shape[0]):
                f.write('{} {:.10f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f}\n'.format(
                    ds.return_class(dets[k, 0]), dets[k, 1],
                    res[k, 0], res[k, 1], res[k, 2], res[k, 3],
                    res[k, 4], res[k, 5], res[k, 6], res[k, 7])
                )
        if args.part == "all":
            assert len(os.listdir(os.path.join(args.data_path,'Ground-Truth'))) != 0, 'No labels found in test/Ground-Truth!! '

    mAP = eval_mAP(args,root_dir, use_07_metric=args.use_07_metric, thres = 0.5)
    # display result
    pf = '%20s' + '%10.4g' * 6  # print format    
    print(pf % ('all', len(ims_list), nt, 0, 0, mAP, 0))
    return 0, 0, mAP, 0 


def evaluate(args,
             model,
             down_ratio,
             hyps=None):

    model.eval()
    if torch.cuda.is_available():
        model.cuda()

    if 'IC' in args.dataset :
        results = icdar_evaluate(model, target_size, test_path, dataset)
    elif args.dataset in ['SSDD','HRSID']:
        results = data_evaluate(args,model,down_ratio)
    elif args.dataset == 'DOTA':
        results = dota_evaluate(args,model,down_ratio)
    else:
        raise RuntimeError('Unsupported dataset!')
    return results



def parse_args():
    parser = argparse.ArgumentParser(description='Hyperparams')

    parser.add_argument('--dataset', default='HRSID',
                        choices=['SSDD','HRSID'],
                        help='dataset name')
    parser.add_argument('--max_obj_per_img',default=100,type=int)  
    parser.add_argument('--num_classes',default=1,type=int)#   ./datasets/UCAS_AOD/Test/  //   
    parser.add_argument('--weight_path',default='../_Weights/rcenternet/HRSID/resnet/best.pth')#ce_gauss_608_r20 
    parser.add_argument('--method', default='resnet')#ce_wh_608_gauss  baseline_ratio_   ce_gauss_800
    
    parser.add_argument('--use_07_metric', default = False,type =bool)
    
    parser.add_argument('--input_h', type=int, default = 800, help='Resized image height')
    parser.add_argument('--input_w', type=int, default = 800, help='Resized image width')
    parser.add_argument('--keep_res', type=int, default = False)
    

    parser.add_argument('--num_workers',default = 0,type=int)

    parser.add_argument('--phase', default = 'test',choices =['train','test'])  # train or test (1,0)
    parser.add_argument('--test_path',default='../_DATASET/HRSID/ImageSets/test.txt')#   ./datasets/UCAS_AOD/Test/  ///
    parser.add_argument('--data_path',default='../_DATASET/HRSID/',help='dataset path')
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = parse_args()
    args.save_path = ".."+os.sep+ "_Weights"+os.sep+'rcenternet'+os.sep + args.dataset + os.sep +args.method
    head = {'hm': args.num_classes, 'wh': 2, 'ang':1, 'reg': 2}

    print(args.method)
    if "dla" in args.method:
        if "dcn" in args.method:
            from dlanet_dcn import DlaNet
            model = DlaNet(34,heads = head )
        else:
            from dlanet import DlaNet
            model = DlaNet(34,heads = head )
    elif "swin" in args.method:
        from swinT import SwinTransformer 
        model = SwinTransformer(heads = head,
                     pretrain_img_size=224,
                     patch_size=4,
                     in_chans=3,
                     embed_dim=96,
                     depths=[2, 2, 18, 2],#[2, 2, 18, 2]for small,[2, 2, 6, 2]for tiny
                     num_heads=[3, 6, 12, 24],
                     window_size=7,
                     mlp_ratio=4.,
                     qkv_bias=True,
                     qk_scale=None,
                     drop_rate=0.,
                     attn_drop_rate=0.,
                     drop_path_rate=0.2,
                     norm_layer=nn.LayerNorm,
                     ape=False,
                     patch_norm=True,
                     out_indices=(0, 1, 2, 3),
                     frozen_stages=-1,
                     use_checkpoint=False,)
        pretrained_net = torch.load('./swin_small_patch4_window7_224.pth')
#         print(pretrained_net.keys())
#         print(pretrained_net)
        model.load_state_dict(pretrained_net['model'], strict=False)
    elif "resnet" in args.method:
        from ctrbox_net import CTRBOX
        model = CTRBOX(heads=head,
                                  pretrained=True,
                                  down_ratio=4,
                                  final_kernel=1,
                                  head_conv=256)

    print(model)   
    model.load_state_dict(torch.load(args.weight_path))#or best.pth?
#    args.test_path = './datasets/SSDD/ImageSets/test.txt'
    args.part = 'all'
#             print("111111111111111111111111")
    results = evaluate(args,
                       model,
                       down_ratio=4,
                       hyps=None)#  p, r, map, f1  
    print('best_all',results)



    
    
    
