"""
进行模型评估
"""
import os
import argparse
import xml.etree.ElementTree as ET

from PIL import Image
from tqdm import tqdm

from settings import set_dataset, set_computing_env

from utils.map import get_coco_map, get_map

from models.detectors.DETECTOR import DETECTOR

def set_eval_configuration():
    parser = argparse.ArgumentParser()
    # dataset configuration
    parser.add_argument('--dataset', default='NWPUv2', type=str, help='Dataset:NWPUv1, NWPUv2, DIOR')
    parser.add_argument('--dataset_path', type=str, default='dataset/NWPUv2')
    
    # detector configuration
    parser.add_argument('--detector', type=str, default='FasterRCNN',help='Detector:FasterRCNN, MetaRCNN')
    parser.add_argument('--model_path', type=str, default='exp_test/frcn_bs16_100e_gpu0/best_epoch_weights.pth', help='Detector weight path')
    parser.add_argument('--mode', type=str, default='inference',help='mode: train, inference')
    # 支持使用ckpt进行评估
    parser.add_argument('--resume', type=bool, default=False)
    parser.add_argument('--ckpt_path', type=str, default=None)

    parser.add_argument('--backbone', type=str, default='resnet50', help='Backbone: resnet50, resnet101')
    # TODO 支持多种不同的pretrain backbone加载，修改模型的url
    parser.add_argument('--pretrained', type=bool, default=False, help='use pretrained backbone') 
    parser.add_argument('--neck', type=str, default='FPN', help='Neck: FPN' ) 
    parser.add_argument('--rpn_head', type=str, default='RPN', help='Densehead: RPN') 
    parser.add_argument('--anchor_scales', type=list, default=[4, 8, 16], help='anchor的尺度') 
    parser.add_argument('--anchor_ratios', type=list, default=[0.5, 1, 2], help='anchor生成的比例') 
    parser.add_argument('--feat_stride', type=int, default = 16) 
    parser.add_argument('--roi_head', type=str, default='Resnet50RoIHead')

    # Decoder 使用
    # 高于此阈值则在NMS过程中认为是有效bbox
    parser.add_argument('--confidence', type=int, default=0.05)
    # NMS过程中，与置信度最高的bbox重叠比例超过此阈值的同类bbox全部丢弃
    parser.add_argument('--nms_iou', type=int, default=0.3)

    # 评估使用
    # 高于此阈值，即对应类别预测的置信度满足要求，被认为预测正确，为True
    parser.add_argument('--score_threhold', type=int, default=0.5) 
    # 高于此阈值，即与GT bbox的IOU重叠满足要求，被认为检出，为Positive
    parser.add_argument('--min_overlap', type=int, default=0.5, help='mAP 0.x')

    # Computing env
    parser.add_argument('--seed', type=int, default=2023)
    parser.add_argument('--gpu_id', type=list, default=[0], help='None: cpu')

    # Evaluation configuration
    parser.add_argument('--eval_mode', type=int, default=0)

    parser.add_argument('--map_out_path', type=str, default='exps/map_out/')
    parser.add_argument('--map_vis', type=bool, default=True)

    parser.add_argument('--heatmap_save_path', type=str, default='exps/visualization/heatmap')

    parser.add_argument('--fps_interval', type=int, default=100)
    
    return parser.parse_args()

if __name__ == "__main__":
    print("\n\033[1;36;40mStart Evaluation....................\033[0m")

    args = set_eval_configuration()

    args.device, args.cuda = set_computing_env(args)

    args.input_shape, args.class_names, args.num_classes,  num_train, num_val, num_test, train_lines, val_lines, test_lines = set_dataset(args)

    # 在测试集上进行评估
    image_ids = open(os.path.join(args.dataset_path, "ImageSets/Main/test.txt")).read().strip().split()

    if not os.path.exists(args.map_out_path):
        os.makedirs(args.map_out_path)
    if not os.path.exists(os.path.join(args.map_out_path, 'ground-truth')):
        os.makedirs(os.path.join(args.map_out_path, 'ground-truth'))
    if not os.path.exists(os.path.join(args.map_out_path, 'detection-results')):
        os.makedirs(os.path.join(args.map_out_path, 'detection-results'))
    if not os.path.exists(os.path.join(args.map_out_path, 'images-test')):
        os.makedirs(os.path.join(args.map_out_path, 'images-test'))

    # print("Load detector.")
    detector = DETECTOR(args)
    # print("Load detector done.")
    
    if args.eval_mode == 0 or args.eval_mode == 1:
        print("Get predict result.")
        for image_id in tqdm(image_ids):
            image_path  = os.path.join(args.dataset_path, "JPEGImages/"+image_id+".jpg")
            image       = Image.open(image_path)
            if args.map_vis:
                image.save(os.path.join(args.map_out_path, "images-test/" + image_id + ".jpg"))
            detector.get_map_txt(image_id, image, args.class_names, args.map_out_path)
        print("Get predict result done.")
        
    if args.eval_mode == 0 or args.eval_mode == 2:
        print("Get ground truth result.")
        for image_id in tqdm(image_ids):
            with open(os.path.join(args.map_out_path, "ground-truth/"+image_id+".txt"), "w") as new_f:
                root = ET.parse(os.path.join(args.dataset_path, "Annotations/"+image_id+".xml")).getroot()
                for obj in root.findall('object'):
                    difficult_flag = False
                    if obj.find('difficult')!=None:
                        difficult = obj.find('difficult').text
                        if int(difficult)==1:
                            difficult_flag = True
                    obj_name = obj.find('name').text
                    if obj_name not in args.class_names:
                        continue
                    bndbox  = obj.find('bndbox')
                    left    = bndbox.find('xmin').text
                    top     = bndbox.find('ymin').text
                    right   = bndbox.find('xmax').text
                    bottom  = bndbox.find('ymax').text

                    if difficult_flag:
                        new_f.write("%s %s %s %s %s difficult\n" % (obj_name, left, top, right, bottom))
                    else:
                        new_f.write("%s %s %s %s %s\n" % (obj_name, left, top, right, bottom))
        print("Get ground truth result done.")

    if args.eval_mode == 0 or args.eval_mode == 3:
        print("Get map.")
        get_map(args.min_overlap, True, score_threhold = args.score_threhold, path = args.map_out_path)
        print("Get map done.")

    if args.eval_mode == 0 or args.eval_mode == 4:
        print("Get map.")
        get_coco_map(class_names = args.class_names, path = args.map_out_path)
        print("Get map done.")

    # if args.eval_mode == 0 or args.eval_mode == 5:
    #     #TODO 计算量统计结果不对
    #     detector.get_model_complexity(args.input_shape)

    if args.eval_mode == 0 or args.eval_mode == 6:
        fps_image_path =args.dataset_path + '/JPEGImages/' + image_ids[0] + '.jpg'
        img = Image.open(fps_image_path)
        tact_time = detector.get_FPS(img, args.fps_interval)
        print(str(tact_time) + ' seconds, ' + str(1/tact_time) + 'FPS, @batch_size 1')
    
    #TODO heatmap颜色不正确
    if args.eval_mode == 0 or args.eval_mode == 7:
        if not os.path.exists(args.heatmap_save_path):
            os.makedirs(args.heatmap_save_path)
        while True:
            img = input('Input image filename:')
            try:
                image = Image.open(img)
            except:
                print('Open Error! Try again!')
                continue
            else:
                detector.detect_heatmap(image, args.heatmap_save_path)