import torch
import numpy as np
import argparse
import os
from tqdm import tqdm
import pickle
import matplotlib.pyplot as plt
import math
import copy
from pprint import pprint

from data.dataset import build_dataset
from utils.boxes import postprocess
from utils.evaluate import get_area, get_intersection_area_matrix, evaluate_matrix, get_precision, get_recall

def parse_args():
    parser = argparse.ArgumentParser(description='evaluate net')
    parser.add_argument("--model", type=str, help='model name', required=True)
    parser.add_argument("--datasets", type=str, help='train datasets name', required=True)
    parser.add_argument("--val-datasets", type=str, help='val datasets name', required=True)
    parser.add_argument("--test-datasets", type=str, help='datasets for sampling', required=True)
    parser.add_argument("--checkpoint", type=str, help="checkpoint iter, can be 'last', 'best', iternumber", default='best')
    parser.add_argument("--threshold", type=float, help='threshold', default=0.5)
    parser.add_argument("--nms-threshold", type=float, help='nms threshold', default=0.5)
    parser.add_argument("--iou-cover-threshold", type=float, help='iou cover threshold', default=0.5)
    parser.add_argument("--proposals-cover-threshold", type=float, help='proposals cover threshold', default=0.7)
    parser.add_argument("--gt-boxes-cover-threshold", type=float, help='gt boxes cover threshold', default=0.7)
    args = parser.parse_args()
    return args

class PredictDictLoader:
    def __init__(self, predict_dict_dir):
        self.predict_dict_dir = predict_dict_dir
        self.load()
        
    def load(self):
        self.predict_dict = {}
        for filename in os.listdir(self.predict_dict_dir):
            path = os.path.join(self.predict_dict_dir, filename)
            with open(path, 'rb') as f:
                self.predict_dict.update(pickle.load(f))

    def __getitem__(self, idx):
        return self.predict_dict[idx]

class PredictionDataset:
    def __init__(self, dataset_names, predict_dict):
        self.dataset_dicts = build_dataset(*dataset_names)
        self.predict_dict = predict_dict
    
    def __len__(self):
        return len(self.dataset_dicts)

    def __getitem__(self, index):
        filename = self.dataset_dicts[index]['file_name']
        gt_boxes = [obj['bbox'] for obj in self.dataset_dicts[index]['annotations']]
        gt_boxes = np.array(gt_boxes).reshape(-1, 4)
        gt_classification = [obj['category_id'] for obj in self.dataset_dicts[index]['annotations']]
        gt_classification = np.array(gt_classification, dtype=np.uint8)
        bboxes = self.predict_dict[filename]['bboxes']
        classification = self.predict_dict[filename]['classification']
        return filename, gt_boxes, gt_classification, bboxes, classification

    def __iter__(self):
        for i in range(len(self)):
            yield self[i]


def evaluate(dataset, **kwargs):
    evaluate_dict = {
        'threshold': kwargs['threshold'],
        'nms_threshold': kwargs['nms_threshold'],
        'average_proposal_count': 0,
        'predict_boxes_area': [],
        'groundtruth_boxes_area': [],
        'iou_cover': {
            'threshold': kwargs['iou_cover_threshold'],
            'miss_boxes_area': [],
            'miss_boxes_width': [],
            'miss_boxes_height': [],
            'hit_boxes_area': [],
            'hit_boxes_width': [],
            'hit_boxes_height': [],
        },
        'proposals_cover': {
            'threshold': kwargs['proposals_cover_threshold'],
            'miss_boxes_area': [],
            'miss_boxes_width': [],
            'miss_boxes_height': [],
            'hit_boxes_area': [],
            'hit_boxes_width': [],
            'hit_boxes_height': [],
        },
        'gt_boxes_cover': {
            'threshold': kwargs['gt_boxes_cover_threshold'],
            'miss_boxes_area': [],
            'miss_boxes_width': [],
            'miss_boxes_height': [],
            'hit_boxes_area': [],
            'hit_boxes_width': [],
            'hit_boxes_height': [],
        },
        'classes': {},
    }

    class_evaluate_dict_template = {
        'threshold': kwargs['threshold'],
        'nms_threshold': kwargs['nms_threshold'],
        'average_proposal_count': 0,
        'predict_boxes_area': [],
        'groundtruth_boxes_area': [],
        'iou_cover': {
            'threshold': kwargs['iou_cover_threshold'],
            'miss_boxes_area': [],
            'miss_boxes_width': [],
            'miss_boxes_height': [],
            'hit_boxes_area': [],
            'hit_boxes_width': [],
            'hit_boxes_height': [],
        },
        'proposals_cover': {
            'threshold': kwargs['proposals_cover_threshold'],
            'miss_boxes_area': [],
            'miss_boxes_width': [],
            'miss_boxes_height': [],
            'hit_boxes_area': [],
            'hit_boxes_width': [],
            'hit_boxes_height': [],
        },
        'gt_boxes_cover': {
            'threshold': kwargs['gt_boxes_cover_threshold'],
            'miss_boxes_area': [],
            'miss_boxes_width': [],
            'miss_boxes_height': [],
            'hit_boxes_area': [],
            'hit_boxes_width': [],
            'hit_boxes_height': [],
        },
    }

    total_class_ids_set = set()

    for filename, gt_boxes_all_classes, gt_classification, bboxes, classification in tqdm(dataset, desc="threshold={:.4f}".format(kwargs['threshold']), leave=False):
        data = postprocess(bboxes, classification, kwargs['threshold'], kwargs['nms_threshold'])
        class_ids_set = set(data['class_ids'])
        proposals = data['rois']

        total_class_ids_set = set.union(total_class_ids_set, class_ids_set)

        for class_id in class_ids_set:
            if class_id not in evaluate_dict["classes"]:
                evaluate_dict['classes'][class_id] = copy.deepcopy(class_evaluate_dict_template)
        
            proposals = data['rois'][data['class_ids'] == class_id]
            gt_boxes = gt_boxes_all_classes[gt_classification == class_id]

            proposals_area = get_area(proposals)
            gt_boxes_area = get_area(gt_boxes)
            intersection_area_matrix = get_intersection_area_matrix(proposals, gt_boxes)
            union_area_matrix = proposals_area.reshape(-1,1) + gt_boxes_area.reshape(1,-1) - intersection_area_matrix

            evaluate_dict['classes'][class_id]['groundtruth_boxes_area'] += [i for i in gt_boxes_area]
            evaluate_dict['classes'][class_id]['predict_boxes_area'] += [i for i in proposals_area]

            temp = {
                'iou_cover': union_area_matrix,
                'proposals_cover': proposals_area.reshape(-1,1),
                'gt_boxes_cover': gt_boxes_area.reshape(1,-1)
            }

            for strategy, matrix in temp.items():
                matrix = intersection_area_matrix / matrix
                matrix = (matrix > evaluate_dict[strategy]['threshold'])
                box_hit = matrix.any(axis=0)
                evaluate_dict['classes'][class_id][strategy]['hit_boxes_area'] += [i for i in gt_boxes_area[box_hit]]
                evaluate_dict['classes'][class_id][strategy]['miss_boxes_area'] += [i for i in gt_boxes_area[~box_hit]]
                evaluate_dict['classes'][class_id][strategy]['hit_boxes_width'] += [i for i in (gt_boxes[:, 2] - gt_boxes[:, 0])[box_hit]]
                evaluate_dict['classes'][class_id][strategy]['hit_boxes_height'] += [i for i in (gt_boxes[:, 3] - gt_boxes[:, 1])[box_hit]]
                evaluate_dict['classes'][class_id][strategy]['miss_boxes_width'] += [i for i in (gt_boxes[:, 2] - gt_boxes[:, 0])[~box_hit]]
                evaluate_dict['classes'][class_id][strategy]['miss_boxes_height'] += [i for i in (gt_boxes[:, 3] - gt_boxes[:, 1])[~box_hit]]

            evaluate_dict['classes'][class_id]['average_proposal_count'] += len(proposals)

    for class_id in total_class_ids_set:
        evaluate_dict['classes'][class_id]['average_proposal_count'] /= len(dataset)
        evaluate_dict['average_proposal_count'] += evaluate_dict['classes'][class_id]['average_proposal_count']
        evaluate_dict['groundtruth_boxes_area'] += evaluate_dict['classes'][class_id]['groundtruth_boxes_area']
        evaluate_dict['predict_boxes_area'] += evaluate_dict['classes'][class_id]['predict_boxes_area']

        for strategy in ['iou_cover', 'proposals_cover', 'gt_boxes_cover']:
            evaluate_dict[strategy]['hit_boxes_area'] += evaluate_dict['classes'][class_id][strategy]['hit_boxes_area']
            evaluate_dict[strategy]['miss_boxes_area'] += evaluate_dict['classes'][class_id][strategy]['miss_boxes_area']
            evaluate_dict[strategy]['hit_boxes_width'] += evaluate_dict['classes'][class_id][strategy]['hit_boxes_width']
            evaluate_dict[strategy]['hit_boxes_height'] += evaluate_dict['classes'][class_id][strategy]['hit_boxes_height']
            evaluate_dict[strategy]['miss_boxes_width'] += evaluate_dict['classes'][class_id][strategy]['miss_boxes_width']
            evaluate_dict[strategy]['miss_boxes_height'] += evaluate_dict['classes'][class_id][strategy]['miss_boxes_height']

    return evaluate_dict

def analysis_result(evaluate_dict, output_dir):
    path = os.path.join(output_dir, 'analysis.txt')
    mean = lambda data: sum(data) / len(data)
    with open(path, 'w', encoding='utf-8') as f:
        print("threshold:", evaluate_dict['threshold'], file=f)
        print("nms_threshold:", evaluate_dict['nms_threshold'], file=f)
        print("average_proposal_count:", evaluate_dict['average_proposal_count'], file=f)

        print("calcuate groundtruth mean ... ", end='')
        groundtruth_mean = mean(evaluate_dict['groundtruth_boxes_area'])
        print("groundtruth_mean:", groundtruth_mean, file=f)
        print("done")

        print("calcuate predict mean ... ", end='')
        predict_mean = mean(evaluate_dict['predict_boxes_area'])
        print("predict_mean:", predict_mean, file=f)
        print("done")

        for strategy in ['iou_cover', 'proposals_cover', 'gt_boxes_cover']:
            print(f"[{strategy}] threshold:", evaluate_dict[strategy]['threshold'], file=f)

            print(f"[{strategy}] calcuate hit mean ... ", end='')
            hit_mean = mean(evaluate_dict[strategy]['hit_boxes_area'])
            print(f"[{strategy}] hit_mean:", hit_mean, file=f)
            print("done")

            print(f"[{strategy}] calcuate miss mean ... ", end='')
            miss_mean = mean(evaluate_dict[strategy]['miss_boxes_area'])
            print(f"[{strategy}] miss_mean:", miss_mean, file=f)
            print("done")


def draw_hit_miss_width_height_heatmap(hit_widths, hit_heights, miss_widths, miss_heights, output_path):
    width_min, width_max = min(*hit_widths, *miss_widths), max(*hit_widths, *miss_widths)
    height_min, height_max = min(*hit_heights, *miss_heights), max(*hit_heights, *miss_heights)
    assert width_min < width_max and height_min < height_max

    bins = (100, 100)
    xedges = np.linspace(width_min, width_max, num=bins[0] + 1)
    yedges = np.linspace(height_min, height_max, num=bins[1] + 1)
    
    hit_heatmap = np.histogram2d(hit_widths, hit_heights, bins=[xedges, yedges], normed=False)[0].T
    miss_heatmap = np.histogram2d(miss_widths, miss_heights, bins=[xedges, yedges], normed=False)[0].T

    fig = plt.figure(figsize=(16, 16))
    
    ax = fig.add_subplot(221)
    ax.set_xticks(np.linspace(0, bins[0], num=6))
    ax.set_xticklabels(np.linspace(width_min, width_max, num=6))
    ax.set_yticks(np.linspace(0, bins[1], num=6))
    ax.set_yticklabels(np.linspace(height_min, height_max, num=6))
    im = ax.imshow(hit_heatmap, cmap=plt.cm.hot_r)
    plt.colorbar(im)
    ax.set_title("hit")

    ax = fig.add_subplot(222)
    ax.set_xticks(np.linspace(0, bins[0], num=6))
    ax.set_xticklabels(np.linspace(width_min, width_max, num=6))
    ax.set_yticks(np.linspace(0, bins[1], num=6))
    ax.set_yticklabels(np.linspace(height_min, height_max, num=6))
    im = ax.imshow(miss_heatmap, cmap=plt.cm.hot_r)
    plt.colorbar(im)
    ax.set_title("miss")

    small_rate = 0.2
    

    ax = fig.add_subplot(223)
    ax.set_xticks(np.linspace(0, int(bins[0] * small_rate), num=6))
    ax.set_xticklabels(np.linspace(width_min, width_min + (width_max - width_min) * small_rate, num=6))
    ax.set_yticks(np.linspace(0, int(bins[1] * small_rate), num=6))
    ax.set_yticklabels(np.linspace(height_min, height_min + (height_max - height_min) * small_rate, num=6))
    im = ax.imshow(hit_heatmap[:int(bins[0] * small_rate), :int(bins[1] * small_rate)], cmap=plt.cm.hot_r)
    plt.colorbar(im)
    ax.set_title("hit-small")

    ax = fig.add_subplot(224)
    ax.set_xticks(np.linspace(0, int(bins[0] * small_rate), num=6))
    ax.set_xticklabels(np.linspace(width_min, width_min + (width_max - width_min) * small_rate, num=6))
    ax.set_yticks(np.linspace(0, int(bins[1] * small_rate), num=6))
    ax.set_yticklabels(np.linspace(height_min, height_min + (height_max - height_min) * small_rate, num=6))
    im = ax.imshow(miss_heatmap[:int(bins[0] * small_rate), :int(bins[1] * small_rate)], cmap=plt.cm.hot_r)
    plt.colorbar(im)
    ax.set_title("miss-small")

    fig.savefig(output_path, dpi=240)

def draw_result(evaluate_dict, output_dir):
    sqrt_list = lambda data: [math.sqrt(i) for i in data]

    plt.figure(figsize=(10,4))
    plt.hist(evaluate_dict['groundtruth_boxes_area'], bins=500, color='g', label='groundtruth')
    plt.hist(evaluate_dict['predict_boxes_area'], bins=500, color='b', label='predict')
    plt.xlabel('box area')
    plt.ylabel('freq')
    plt.legend()
    output_path = os.path.join(output_dir, 'total.png')
    plt.savefig(output_path, dpi=240)

    plt.figure(figsize=(10,4))
    plt.hist(sqrt_list(evaluate_dict['groundtruth_boxes_area']), bins=500, color='g', label='groundtruth')
    plt.hist(sqrt_list(evaluate_dict['predict_boxes_area']), bins=500, color='b', label='predict')
    plt.xlabel('box sqrt area')
    plt.ylabel('freq')
    plt.legend()
    output_path = os.path.join(output_dir, 'total-sqrt.png')
    plt.savefig(output_path, dpi=240)

    for strategy in ['iou_cover', 'proposals_cover', 'gt_boxes_cover']:
        plt.figure(figsize=(10,4))
        plt.hist(evaluate_dict[strategy]['hit_boxes_area'], bins=500, color='g', label='hit')
        plt.hist(evaluate_dict[strategy]['miss_boxes_area'], bins=500, color='r', label='miss')
        plt.xlabel('box area')
        plt.ylabel('freq')
        plt.legend()
        output_path = os.path.join(output_dir, f'{strategy}.png')
        plt.savefig(output_path, dpi=240)

        plt.figure(figsize=(10,4))
        plt.hist(sqrt_list(evaluate_dict[strategy]['hit_boxes_area']), bins=500, color='g', label='hit')
        plt.hist(sqrt_list(evaluate_dict[strategy]['miss_boxes_area']), bins=500, color='r', label='miss')
        plt.xlabel('box sqrt area')
        plt.ylabel('freq')
        plt.legend()
        output_path = os.path.join(output_dir, f'{strategy}-sqrt.png')
        plt.savefig(output_path, dpi=240)

        plt.figure(figsize=(10,4))
        plt.hist(evaluate_dict[strategy]['miss_boxes_area'], bins=500, color='r', label='miss')
        plt.xlabel('box area')
        plt.ylabel('freq')
        plt.legend()
        output_path = os.path.join(output_dir, f'[{strategy}]-miss.png')
        plt.savefig(output_path, dpi=240)

        plt.figure(figsize=(10,4))
        plt.hist(sqrt_list(evaluate_dict[strategy]['miss_boxes_area']), bins=500, color='r', label='miss')
        plt.xlabel('box sqrt area')
        plt.ylabel('freq')
        plt.legend()
        output_path = os.path.join(output_dir, f'[{strategy}]-sqrt-miss.png')
        plt.savefig(output_path, dpi=240)

        plt.figure(figsize=(10,4))
        plt.hist(evaluate_dict[strategy]['hit_boxes_area'], bins=500, color='g', label='hit')
        plt.xlabel('box area')
        plt.ylabel('freq')
        plt.legend()
        output_path = os.path.join(output_dir, f'[{strategy}]-hit.png')
        plt.savefig(output_path, dpi=240)

        plt.figure(figsize=(10,4))
        plt.hist(sqrt_list(evaluate_dict[strategy]['hit_boxes_area']), bins=500, color='g', label='hit')
        plt.xlabel('box sqrt area')
        plt.ylabel('freq')
        plt.legend()
        output_path = os.path.join(output_dir, f'[{strategy}]-sqrt-hit.png')
        plt.savefig(output_path, dpi=240)

        output_path = os.path.join(output_dir, f'[{strategy}]-heatmap.png')
        draw_hit_miss_width_height_heatmap(
            evaluate_dict[strategy]['hit_boxes_width'],
            evaluate_dict[strategy]['hit_boxes_height'],
            evaluate_dict[strategy]['miss_boxes_width'],
            evaluate_dict[strategy]['miss_boxes_height'],
            output_path
        )


if __name__ == '__main__':
    args = parse_args()

    train_datasets = ["{}-train".format(dataset) for dataset in args.datasets.split(',')]
    val_datasets = ["{}-test".format(dataset) for dataset in args.val_datasets.split(',')]
    test_datasets = ["{}-test".format(dataset) for dataset in args.test_datasets.split(',')]
    
    print("loading predict dict, it may take a few minutes ... ", end='', flush=True)
    predict_dict_dir = os.path.join('output','predict', '{}/[train]-{}/[val]-{}/[test]-{}-[checkpoint]-{}'.format(args.model, '&'.join(train_datasets), '&'.join(val_datasets), '&'.join(test_datasets), args.checkpoint))
    predict_dict = PredictDictLoader(predict_dict_dir)
    print("load done", flush=True)

    dataset = PredictionDataset(test_datasets, predict_dict)
    
    kwargs = {
        'threshold': args.threshold,
        'nms_threshold': args.nms_threshold,
        'iou_cover_threshold': args.iou_cover_threshold,
        'proposals_cover_threshold': args.proposals_cover_threshold,
        'gt_boxes_cover_threshold': args.gt_boxes_cover_threshold,
    }

    output_dir = os.path.join('output','analysis_boxes', '{}/[train]-{}/[val]-{}/[test]-{}-[checkpoint]-{}/[threshold]-{}-[nms]-{}-[evaluate]-{}-{}-{}'.format(args.model, '&'.join(train_datasets), '&'.join(val_datasets), '&'.join(test_datasets), args.checkpoint, args.threshold, args.nms_threshold, args.iou_cover_threshold, args.proposals_cover_threshold, args.gt_boxes_cover_threshold))
    os.makedirs(output_dir, exist_ok=True)

    evaluate_dict = evaluate(dataset, **kwargs)
    analysis_result(evaluate_dict, output_dir)
    draw_result(evaluate_dict, output_dir)
    for class_id, class_evaluate_dict in evaluate_dict['classes'].items():
        if len(class_evaluate_dict['groundtruth_boxes_area']) == 0:
            continue
        class_output_dir = os.path.join(output_dir, str(class_id))
        os.makedirs(class_output_dir, exist_ok=True)
        analysis_result(class_evaluate_dict, class_output_dir)
        draw_result(class_evaluate_dict, class_output_dir)

    print("result save at {}".format(output_dir))
    print("done")
