import torch
import numpy as np
import argparse
import os
from tqdm import tqdm
import pickle
import copy
import gc

from data.dataset import build_dataset
from utils.boxes import postprocess
from utils.evaluate import get_area, get_intersection_area_matrix, evaluate_matrix, get_precision, get_recall

def parse_args():
    parser = argparse.ArgumentParser(description='evaluate net')
    parser.add_argument("--model", type=str, help='model name', required=True)
    parser.add_argument("--datasets", type=str, help='train datasets name', required=True)
    parser.add_argument("--val-datasets", type=str, help='val datasets name', required=True)
    parser.add_argument("--test-datasets", type=str, help='datasets for sampling', required=True)
    parser.add_argument("--checkpoint", type=str, help="checkpoint iter, can be 'last', 'best', iternumber", default='best')
    parser.add_argument("--nms-threshold", type=float, help='nms threshold', default=0.5)
    parser.add_argument("--iou-cover-threshold", type=float, help='iou cover threshold', default=0.5)
    parser.add_argument("--proposals-cover-threshold", type=float, help='proposals cover threshold', default=0.7)
    parser.add_argument("--gt-boxes-cover-threshold", type=float, help='gt boxes cover threshold', default=0.7)
    args = parser.parse_args()
    return args

class PredictDictLoader:
    def __init__(self, predict_dict_dir):
        self.predict_dict_dir = predict_dict_dir
        
    def __iter__(self):
        for filename in os.listdir(self.predict_dict_dir):
            path = os.path.join(self.predict_dict_dir, filename)
            with open(path, 'rb') as f:
                predict_dict = pickle.load(f)
            yield predict_dict

    def load(self):
        self.predict_dict = {}
        for filename in os.listdir(self.predict_dict_dir):
            path = os.path.join(self.predict_dict_dir, filename)
            with open(path, 'rb') as f:
                self.predict_dict.update(pickle.load(f))

    def __getitem__(self, idx):
        return self.predict_dict[idx]

class PredictionDataset:
    def __init__(self, dataset_names, predict_dict):
        self.dataset_dicts = build_dataset(*dataset_names)
        self.predict_dict = predict_dict

        filenames = set(self.predict_dict.keys())
        self.dataset_dicts = [dataset_dict for dataset_dict in self.dataset_dicts if dataset_dict['file_name'] in filenames]
    
    def __len__(self):
        return len(self.dataset_dicts)

    def __getitem__(self, index):
        filename = self.dataset_dicts[index]['file_name']
        gt_boxes = [obj['bbox'] for obj in self.dataset_dicts[index]['annotations']]
        gt_boxes = np.array(gt_boxes).reshape(-1, 4)
        gt_classification = [obj['category_id'] for obj in self.dataset_dicts[index]['annotations']]
        gt_classification = np.array(gt_classification, dtype=np.uint8)
        bboxes = self.predict_dict[filename]['bboxes']
        classification = self.predict_dict[filename]['classification']
        return filename, gt_boxes, gt_classification, bboxes, classification

    def __iter__(self):
        for i in range(len(self)):
            yield self[i]


def evaluate(dataset, **kwargs):
    evaluate_dict = {
        'image_count': len(dataset),
        'threshold': kwargs['threshold'],
        'nms_threshold': kwargs['nms_threshold'],
        'average_proposal_count': 0,
        'iou_cover': {
            'threshold': kwargs['iou_cover_threshold'],
            'TP': 0,
            'FP': 0,
            'FN': 0,
            'macro_precision': 0,
            'macro_recall': 0,
            'micro_precision': 0,
            'micro_recall': 0,
        },
        'proposals_cover': {
            'threshold': kwargs['proposals_cover_threshold'],
            'TP': 0,
            'FP': 0,
            'FN': 0,
            'macro_precision': 0,
            'macro_recall': 0,
            'micro_precision': 0,
            'micro_recall': 0,
        },
        'gt_boxes_cover': {
            'threshold': kwargs['gt_boxes_cover_threshold'],
            'TP': 0,
            'FP': 0,
            'FN': 0,
            'macro_precision': 0,
            'macro_recall': 0,
            'micro_precision': 0,
            'micro_recall': 0,
        },
        'classes': {},
    }
    class_evaluate_dict_template = {
        'average_proposal_count': 0,
        'iou_cover': {
            'threshold': kwargs['iou_cover_threshold'],
            'TP': 0,
            'FP': 0,
            'FN': 0,
            'macro_precision': 0,
            'macro_recall': 0,
            'micro_precision': 0,
            'micro_recall': 0,
        },
        'proposals_cover': {
            'threshold': kwargs['proposals_cover_threshold'],
            'TP': 0,
            'FP': 0,
            'FN': 0,
            'macro_precision': 0,
            'macro_recall': 0,
            'micro_precision': 0,
            'micro_recall': 0,
        },
        'gt_boxes_cover': {
            'threshold': kwargs['gt_boxes_cover_threshold'],
            'TP': 0,
            'FP': 0,
            'FN': 0,
            'macro_precision': 0,
            'macro_recall': 0,
            'micro_precision': 0,
            'micro_recall': 0,
        },
    }

    for filename, gt_boxes_all_classes, gt_classification, bboxes, classification in tqdm(dataset, desc="part={} threshold={:.4f}".format(kwargs['part'], kwargs['threshold']), leave=False):
        data = postprocess(bboxes, classification, kwargs['threshold'], kwargs['nms_threshold'])
        class_ids_set = set(data['class_ids'])
        all_classes_evaluate_dict = copy.deepcopy(class_evaluate_dict_template)
        
        for class_id in class_ids_set:
            if class_id not in evaluate_dict["classes"]:
                evaluate_dict["classes"][class_id] = copy.deepcopy(class_evaluate_dict_template)

            proposals = data['rois'][data['class_ids'] == class_id]
            gt_boxes = gt_boxes_all_classes[gt_classification == class_id]

            proposals_area = get_area(proposals)
            gt_boxes_area = get_area(gt_boxes)
            intersection_area_matrix = get_intersection_area_matrix(proposals, gt_boxes)
            union_area_matrix = proposals_area.reshape(-1,1) + gt_boxes_area.reshape(1,-1) - intersection_area_matrix

            temp = {
                'iou_cover': union_area_matrix,
                'proposals_cover': proposals_area.reshape(-1,1),
                'gt_boxes_cover': gt_boxes_area.reshape(1,-1)
            }

            for strategy, matrix in temp.items():
                matrix = intersection_area_matrix / matrix
                TP, FP, FN = evaluate_matrix(matrix, evaluate_dict["classes"][class_id][strategy]['threshold'])
                evaluate_dict["classes"][class_id][strategy]['TP'] += TP
                evaluate_dict["classes"][class_id][strategy]['FP'] += FP
                evaluate_dict["classes"][class_id][strategy]['FN'] += FN
                evaluate_dict["classes"][class_id][strategy]['macro_precision'] += get_precision(TP, FP, FN)
                evaluate_dict["classes"][class_id][strategy]['macro_recall'] += get_recall(TP, FP, FN)

                all_classes_evaluate_dict[strategy]['TP'] += TP
                all_classes_evaluate_dict[strategy]['FP'] += FP
                all_classes_evaluate_dict[strategy]['FN'] += FN

            evaluate_dict['average_proposal_count'] += len(proposals)
            evaluate_dict["classes"][class_id]['average_proposal_count'] += len(proposals)
            
        for strategy in ['iou_cover', 'proposals_cover', 'gt_boxes_cover']:
            TP = all_classes_evaluate_dict[strategy]['TP']
            FP = all_classes_evaluate_dict[strategy]['FP']
            FN = all_classes_evaluate_dict[strategy]['FN']
            evaluate_dict[strategy]['TP'] += TP
            evaluate_dict[strategy]['FP'] += FP
            evaluate_dict[strategy]['FN'] += FN
            evaluate_dict[strategy]['macro_precision'] += get_precision(TP, FP, FN)
            evaluate_dict[strategy]['macro_recall'] += get_recall(TP, FP, FN)

    return evaluate_dict

def compose_evaluate_dicts(evaluate_dicts):
    evaluate_dict = {
        'image_count': 0,
        'threshold': evaluate_dicts[0]['threshold'],
        'nms_threshold': evaluate_dicts[0]['nms_threshold'],
        'average_proposal_count': 0,
        'iou_cover': {
            'threshold': evaluate_dicts[0]['iou_cover']['threshold'],
            'TP': 0,
            'FP': 0,
            'FN': 0,
            'macro_precision': 0,
            'macro_recall': 0,
            'micro_precision': 0,
            'micro_recall': 0,
        },
        'proposals_cover': {
            'threshold': evaluate_dicts[0]['proposals_cover']['threshold'],
            'TP': 0,
            'FP': 0,
            'FN': 0,
            'macro_precision': 0,
            'macro_recall': 0,
            'micro_precision': 0,
            'micro_recall': 0,
        },
        'gt_boxes_cover': {
            'threshold': evaluate_dicts[0]['gt_boxes_cover']['threshold'],
            'TP': 0,
            'FP': 0,
            'FN': 0,
            'macro_precision': 0,
            'macro_recall': 0,
            'micro_precision': 0,
            'micro_recall': 0,
        },
        'classes': {},
    }
    for part_evaluate_dict in evaluate_dicts:
        evaluate_dict['image_count'] += part_evaluate_dict['image_count']
        evaluate_dict['average_proposal_count'] += part_evaluate_dict['average_proposal_count']
        for strategy in ['iou_cover', 'proposals_cover', 'gt_boxes_cover']:
            for name in ['TP', 'FP', 'FN', 'macro_precision', 'macro_recall']:
                evaluate_dict[strategy][name] += part_evaluate_dict[strategy][name]
        
        for class_id, class_dict in part_evaluate_dict['classes'].items():
            if class_id not in evaluate_dict['classes']:
                evaluate_dict['classes'][class_id] = part_evaluate_dict['classes'][class_id]
            else:
                evaluate_dict['classes'][class_id]['average_proposal_count'] += part_evaluate_dict['classes'][class_id]['average_proposal_count']
                for strategy in ['iou_cover', 'proposals_cover', 'gt_boxes_cover']:
                    for name in ['TP', 'FP', 'FN', 'macro_precision', 'macro_recall']:
                        evaluate_dict['classes'][class_id][strategy][name] += part_evaluate_dict['classes'][class_id][strategy][name]
    
    evaluate_dict['average_proposal_count'] /= evaluate_dict['image_count']
    for class_id in evaluate_dict['classes'].keys():
        evaluate_dict["classes"][class_id]['average_proposal_count'] /= evaluate_dict['image_count']
        
    for strategy in ['iou_cover', 'proposals_cover', 'gt_boxes_cover']:
        evaluate_dict[strategy]['macro_precision'] /= evaluate_dict['image_count']
        evaluate_dict[strategy]['macro_recall'] /= evaluate_dict['image_count']
        evaluate_dict[strategy]['micro_precision'] = get_precision(evaluate_dict[strategy]['TP'], evaluate_dict[strategy]['FP'], evaluate_dict[strategy]['FN'])
        evaluate_dict[strategy]['micro_recall'] = get_recall(evaluate_dict[strategy]['TP'], evaluate_dict[strategy]['FP'], evaluate_dict[strategy]['FN'])

        for class_id in evaluate_dict['classes'].keys():
            evaluate_dict["classes"][class_id][strategy]['macro_precision'] /= evaluate_dict['image_count']
            evaluate_dict["classes"][class_id][strategy]['macro_recall'] /= evaluate_dict['image_count']
            evaluate_dict["classes"][class_id][strategy]['micro_precision'] = get_precision(evaluate_dict["classes"][class_id][strategy]['TP'], evaluate_dict["classes"][class_id][strategy]['FP'], evaluate_dict["classes"][class_id][strategy]['FN'])
            evaluate_dict["classes"][class_id][strategy]['micro_recall'] = get_recall(evaluate_dict["classes"][class_id][strategy]['TP'], evaluate_dict["classes"][class_id][strategy]['FP'], evaluate_dict["classes"][class_id][strategy]['FN'])

    return evaluate_dict       

if __name__ == '__main__':
    args = parse_args()

    train_datasets = ["{}-train".format(dataset) for dataset in args.datasets.split(',')]
    val_datasets = ["{}-test".format(dataset) for dataset in args.val_datasets.split(',')]
    test_datasets = ["{}-test".format(dataset) for dataset in args.test_datasets.split(',')]
    
    predict_dict_dir = os.path.join('output','predict', '{}/[train]-{}/[val]-{}/[test]-{}-[checkpoint]-{}'.format(args.model, '&'.join(train_datasets), '&'.join(val_datasets), '&'.join(test_datasets), args.checkpoint))
    
    
    kwargs = {
        'threshold': 0,
        'nms_threshold': args.nms_threshold,
        'iou_cover_threshold': args.iou_cover_threshold,
        'proposals_cover_threshold': args.proposals_cover_threshold,
        'gt_boxes_cover_threshold': args.gt_boxes_cover_threshold,
    }

    evaluate_dict_map = {}
    for i, predict_dict in enumerate(PredictDictLoader(predict_dict_dir)):
        kwargs['part'] = i
        dataset = PredictionDataset(test_datasets, predict_dict)
        for threshold in np.arange(0.99, 0.05, -0.01):
            if threshold not in evaluate_dict_map:
                evaluate_dict_map[threshold] = []
            kwargs['threshold'] = threshold
            part_evaluate_dict = evaluate(dataset, **kwargs)
            evaluate_dict_map[threshold].append(part_evaluate_dict)
        del predict_dict
        del dataset
        gc.collect()
    
    evaluate_dict_list = [compose_evaluate_dicts(evaluate_dict_map[threshold]) for threshold in evaluate_dict_map]

    output_path = os.path.join('output','evaluate', '{}/[train]-{}/[val]-{}/[test]-{}-[checkpoint]-{}/[nms]-{}-[evaluate]-{}-{}-{}.pkl'.format(args.model, '&'.join(train_datasets), '&'.join(val_datasets), '&'.join(test_datasets), args.checkpoint, args.nms_threshold, args.iou_cover_threshold, args.proposals_cover_threshold, args.gt_boxes_cover_threshold))
    os.makedirs(os.path.dirname(output_path), exist_ok=True)
    with open(output_path, 'wb') as f:
        pickle.dump(evaluate_dict_list, f)

    print("evaluate dict list save at {}".format(output_path))
    print("done")




