import torch
import numpy as np
import os
from tqdm import tqdm
import argparse
import pickle
import shutil
import copy
from pprint import pprint
import matplotlib.pyplot as plt
from pathlib import Path

from data.utils import read_image
from data.dataset import build_dataset
from engine import Predictor
from utils.config import get_test_cfg
from utils.boxes import postprocess
from utils.evaluate import get_area, get_intersection_area_matrix, evaluate_matrix2, get_precision, get_recall


def parse_args():
    parser = argparse.ArgumentParser(description="predict net")
    parser.add_argument("--model", type=str, help='model name', required=True)
    parser.add_argument("--datasets", type=str, help='train datasets name', required=True)
    parser.add_argument("--val-datasets", type=str, help='val datasets name', required=True)
    parser.add_argument("--test-datasets", type=str, help='datasets for sampling', required=True)
    parser.add_argument("--checkpoint", type=str, help="checkpoint iter, can be 'last', 'best', iternumber", default='best')
    parser.add_argument("--gpus", type=str, help="use gpus id", default='0')
    parser.add_argument("--batch-size", type=int, help='batch size', default=16)
    parser.add_argument("--threshold", type=float, help='threshold', default=0.2)
    parser.add_argument("--nms-threshold", type=float, help='nms threshold', default=0.5)
    parser.add_argument("--iou-cover-threshold", type=float, help='iou cover threshold', default=0.5)
    parser.add_argument("--proposals-cover-threshold", type=float, help='proposals cover threshold', default=0.7)
    parser.add_argument("--gt-boxes-cover-threshold", type=float, help='gt boxes cover threshold', default=0.7)
    args = parser.parse_args()
    return args

class FullDataset:
    def __init__(self, dataset_names):
        self.dataset_dicts = build_dataset(*dataset_names)

    def __len__(self):
        return len(self.dataset_dicts)

    def __getitem__(self, index):
        filename = self.dataset_dicts[index]['file_name']
        gt_boxes = [obj['bbox'] for obj in self.dataset_dicts[index]['annotations']]
        gt_boxes = np.array(gt_boxes).reshape(-1, 4)
        gt_classification = [obj['category_id'] for obj in self.dataset_dicts[index]['annotations']]
        gt_classification = np.array(gt_classification, dtype=np.uint8)
        image = read_image(filename, preprocess_method=self.dataset_dicts[index]['preprocess'], params=self.dataset_dicts[index]['params'])
        return filename, image, gt_boxes, gt_classification

class BatchDataset:
    def __init__(self, dataset, batch_size, drop_last=False):
        self.dataset = dataset
        self.drop_last = drop_last
        self.batch_size = batch_size

        assert len(self) > 0

    def __len__(self):
        length = len(self.dataset) // self.batch_size
        if not self.drop_last and len(self.dataset) % self.batch_size != 0:
            length += 1
        return length

    def __getitem__(self, index):
        start, stop, step = slice(index * self.batch_size, (index + 1) * self.batch_size).indices(len(self.dataset))
        return zip(*[self.dataset[i] for i in range(start, stop, step)])
        
    def __iter__(self):
        for i in range(len(self)):
            yield self[i]

def predict_and_evaluate(predictor, dataset, **kwargs):
    evaluate_dict = {
        'threshold': kwargs['threshold'],
        'nms_threshold': kwargs['nms_threshold'],
        'image_count': 0,
        'classes': {},
    }
    class_evaluate_dict_template = {
        'image_count': 0,
        'average_proposal_count': 0,
        'iou_cover': {
            'threshold': kwargs['iou_cover_threshold'],
            'TP': 0,
            'TP_gt': 0,
            'FP': 0,
            'FN': 0,
            'macro_precision': 0,
            'macro_recall': 0,
            'micro_precision': 0,
            'micro_recall': 0,
        },
        'proposals_cover': {
            'threshold': kwargs['proposals_cover_threshold'],
            'TP': 0,
            'TP_gt': 0,
            'FP': 0,
            'FN': 0,
            'macro_precision': 0,
            'macro_recall': 0,
            'micro_precision': 0,
            'micro_recall': 0,
        },
        'gt_boxes_cover': {
            'threshold': kwargs['gt_boxes_cover_threshold'],
            'TP': 0,
            'TP_gt': 0,
            'FP': 0,
            'FN': 0,
            'macro_precision': 0,
            'macro_recall': 0,
            'micro_precision': 0,
            'micro_recall': 0,
        },
    }
    tp, conf, pred_cls, target_cls = [], [], [], []
    for filename_list, image_list, gt_boxes_list, gt_classification_list in tqdm(dataset):
        data_dict = predictor(image_list)
        evaluate_dict['image_count'] += len(filename_list)
        
        for filename, gt_boxes_all_classes, gt_classification, bboxes, classification in zip(filename_list, gt_boxes_list, gt_classification_list, data_dict['bboxes'], data_dict['classification']):
            #bboxes = bboxes.cpu()
            #classification = classification.cpu()
            gt_class_ids_set = set(gt_classification)

            data = postprocess(bboxes, classification, kwargs['threshold'], kwargs['nms_threshold'])
            # class_ids_set = set(data['class_ids']).intersection(gt_class_ids_set)
            class_ids_set = set(data['class_ids'])

            for class_id in class_ids_set:
                if class_id not in evaluate_dict["classes"]:
                    evaluate_dict["classes"][class_id] = copy.deepcopy(class_evaluate_dict_template)

                evaluate_dict['classes'][class_id]['image_count'] += 1

                proposals = data['rois'][data['class_ids'] == class_id]
                gt_boxes = gt_boxes_all_classes[gt_classification == class_id]

                proposals_area = get_area(proposals)
                gt_boxes_area = get_area(gt_boxes)
                intersection_area_matrix = get_intersection_area_matrix(proposals, gt_boxes)
                union_area_matrix = proposals_area.reshape(-1,1) + gt_boxes_area.reshape(1,-1) - intersection_area_matrix

                temp = {
                    'iou_cover': union_area_matrix,
                    'proposals_cover': proposals_area.reshape(-1,1),
                    'gt_boxes_cover': gt_boxes_area.reshape(1,-1)
                }

                for strategy, matrix in temp.items():
                    matrix = intersection_area_matrix / matrix
                    TP, TP_gt, FP, FN = evaluate_matrix2(matrix, evaluate_dict["classes"][class_id][strategy]['threshold'])
                    evaluate_dict["classes"][class_id][strategy]['TP'] += TP
                    evaluate_dict["classes"][class_id][strategy]['TP_gt'] += TP_gt
                    evaluate_dict["classes"][class_id][strategy]['FP'] += FP
                    evaluate_dict["classes"][class_id][strategy]['FN'] += FN
                    evaluate_dict["classes"][class_id][strategy]['macro_precision'] += get_precision(TP, FP, FN)
                    evaluate_dict["classes"][class_id][strategy]['macro_recall'] += get_recall(TP_gt, FP, FN)

                    if strategy == 'iou_cover' and class_id == 0:
                        matrix = matrix > evaluate_dict["classes"][class_id][strategy]['threshold']
                        tp.append(matrix.any(axis=1))
                        conf.append(data['scores'][data['class_ids'] == class_id])
                        pred_cls.append(data['class_ids'][data['class_ids'] == class_id])
                        target_cls.append(gt_classification[gt_classification == class_id])

                evaluate_dict["classes"][class_id]['average_proposal_count'] += len(proposals)
    
    tp, conf, pred_cls, target_cls = np.concatenate(tp, 0).reshape(-1, 1), np.concatenate(conf, 0), np.concatenate(pred_cls, 0), np.concatenate(target_cls)
    
    print("tp.shape:", tp.shape)
    print("conf.shape:", conf.shape)
    print("pred_cls.shape:", pred_cls.shape)
    print("target_cls.shape:", target_cls.shape)
    mr, fppi, lamr, lamr_class = log_average_miss_rate(tp, conf, pred_cls, target_cls, evaluate_dict['image_count'], plot=True, save_dir=kwargs['save_dir'], names=kwargs['class_names'])
    lamr, mr, fppi = lamr[:, 0], mr[:, 0], fppi[:, 0]
    
    print("LAMR >> ")
    print("     >> CLASS: {},\tLAMR: {},\tMR: {},\tFPPI: {}".format('all', lamr, mr, fppi))
    if len(lamr_class) > 0:
        for i, c in enumerate(lamr_class):
            print("     >> CLASS: {},\tLAMR: {},\tMR: {},\tFPPI: {}".format(kwargs['class_names'][c], lamr[i], mr[i], fppi[i]))
    

    for class_id in evaluate_dict['classes'].keys():
        evaluate_dict["classes"][class_id]['average_proposal_count'] /= evaluate_dict["classes"][class_id]['image_count']
        
    for strategy in ['iou_cover', 'proposals_cover', 'gt_boxes_cover']:
        for class_id in evaluate_dict['classes'].keys():
            evaluate_dict["classes"][class_id][strategy]['macro_precision'] /= evaluate_dict["classes"][class_id]['image_count']
            evaluate_dict["classes"][class_id][strategy]['macro_recall'] /= evaluate_dict["classes"][class_id]['image_count']
            evaluate_dict["classes"][class_id][strategy]['micro_precision'] = get_precision(evaluate_dict["classes"][class_id][strategy]['TP'], evaluate_dict["classes"][class_id][strategy]['FP'], evaluate_dict["classes"][class_id][strategy]['FN'])
            evaluate_dict["classes"][class_id][strategy]['micro_recall'] = get_recall(evaluate_dict["classes"][class_id][strategy]['TP_gt'], evaluate_dict["classes"][class_id][strategy]['FP'], evaluate_dict["classes"][class_id][strategy]['FN'])

    return evaluate_dict


def log_average_miss_rate(tp, conf, pred_cls, target_cls, num_image, plot=False, save_dir='missrate_falsepostivesperimage_curve.png', names=[]):
    # Sort by objectness
    i = np.argsort(-conf)
    tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]

    # Find unique class
    unique_classes = np.unique(target_cls)

    px, py = np.linspace(1e-3, 1e2, 1000), []
    mr_fppi_score = 0.1  # score to evaluate mr and fppi
    s = [unique_classes.shape[0], tp.shape[1]]
    lamr, mr, fppi = np.zeros(s), np.zeros(s), np.zeros(s)
    for ci, c in enumerate(unique_classes):
        i = pred_cls == c
        n_l = (target_cls == c).sum()  # number of labels
        n_p = i.sum()  # numebr of predictions

        if n_p == 0 or n_l == 0:
            continue
        else:
            # Accumulate TPs and FPs
            fpc = (1 - tp[i]).cumsum(0)
            tpc = tp[i].cumsum(0)
            print("fpc.shape:", fpc.shape)
            print("tpc.shape:", tpc.shape)

            # miss rate
            miss_rate = 1 - tpc / (n_l + 1e-16)
            mr[ci] = np.interp(mr_fppi_score, conf[i], miss_rate[:, 0])
            print("miss_rate.shape:", miss_rate.shape)
            print("mr.shape:", mr.shape)
            print("miss_rate < 0:", (miss_rate < 0).sum())

            # FPPI
            false_positives_per_image = fpc / num_image
            fppi[ci] = np.interp(mr_fppi_score, conf[i], false_positives_per_image[:, 0])
            print("false_positives_per_image.shape:", false_positives_per_image.shape)
            print("fppi.shape:", fppi.shape)

            # LAMR for FPPI-MR curve
            for j in range(tp.shape[1]):
                ref = np.logspace(-2.0, 0.0, num=9)
                ref = np.interp(ref, false_positives_per_image[:, j], miss_rate[:, j], left=1, right=0)
                lamr[ci, j] = np.exp(np.mean(np.log(np.maximum(1e-10, ref))))

                if plot and (j == 0):
                    py.append(np.interp(px, false_positives_per_image[:, j], miss_rate[:, j], left=1, right=0))
            
    if plot:
        plot_mr_fppi_curve(px, py, lamr, save_dir, names)
    
    return mr, fppi, lamr, unique_classes.astype('int32')


def plot_mr_fppi_curve(px, py, lamr, save_dir='.', names=()):
    fig, ax = plt.subplots(1, 1, figsize=(9, 6))
    py = np.stack(py, axis=1)

    if 0 < len(names) < 21:  # show mAP in legend if < 10 classes
        for i, y in enumerate(py.T):
            ax.plot(px, y, linewidth=1, label=f'{names[i]} %.3f' % lamr[i, 0])  # plot(recall, precision)
    else:
        ax.plot(px, py, linewidth=1, color='grey')  # plot(recall, precision)

    ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f LAMR@0.5' % lamr[:, 0].mean())
    ax.set_xlabel('False Positives Per Image (FPPI)')
    ax.set_ylabel('Miss Rate')
    ax.set_xscale('log')
    plt.grid(b=True, which='major', linestyle='-', linewidth=1)
    plt.grid(b=True, which='minor', linestyle='dotted', linewidth=0.7)
    plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
    fig.tight_layout()
    fig.savefig(Path(save_dir) / 'mr_fppi_curve_person.png', dpi=250)




if __name__ == '__main__':
    args = parse_args()
    config_path = 'configs/model/{}.yaml'.format(args.model)
    cfg = get_test_cfg(config_path)

    train_datasets = ["{}-train".format(dataset) for dataset in args.datasets.split(',')]
    val_datasets = ["{}-test".format(dataset) for dataset in args.val_datasets.split(',')]
    test_datasets = ["{}-test".format(dataset) for dataset in args.test_datasets.split(',')]

    cfg.OUTPUT_DIR = os.path.join(cfg.OUTPUT_DIR, "checkpoint", "{}/[train]-{}/[val]-{}".format(args.model, '&'.join(train_datasets), '&'.join(val_datasets)))
    
    if args.checkpoint == 'best':
        cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, 'model_best.pth')
    elif args.checkpoint == 'last':
        cfg.MODEL.WEIGHTS = None
    else:
        cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, 'model_{:0>8}.pth'.format(args.checkpoint))
    
    cfg.MODEL.DEVICES = ["cuda:{}".format(gpu) for gpu in args.gpus.split(',')]

    predictor = Predictor(cfg)

    dataset = BatchDataset(FullDataset(test_datasets), batch_size=args.batch_size, drop_last=False)

    output_dir = os.path.join('output', 'evaluate3_pipeline_lamr', '{}/[train]-{}/[val]-{}/[test]-{}-[checkpoint]-{}/[nms]-{}-[evaluate]-{}-{}-{}'.format(args.model, '&'.join(train_datasets), '&'.join(val_datasets), '&'.join(test_datasets), args.checkpoint, args.nms_threshold, args.iou_cover_threshold, args.proposals_cover_threshold, args.gt_boxes_cover_threshold))
    os.makedirs(output_dir, exist_ok=True)

    kwargs = {
        'threshold': args.threshold,
        'nms_threshold': args.nms_threshold,
        'iou_cover_threshold': args.iou_cover_threshold,
        'proposals_cover_threshold': args.proposals_cover_threshold,
        'gt_boxes_cover_threshold': args.gt_boxes_cover_threshold,
        'class_names': ['person'],
        'save_dir': output_dir,
    }

    evaluate_dict = predict_and_evaluate(predictor, dataset, **kwargs)

    output_path = os.path.join(output_dir, 'evaluate_dict.txt')
    os.makedirs(os.path.dirname(output_path), exist_ok=True)
    with open(output_path, 'w', encoding='utf-8') as f:
        pprint(evaluate_dict, stream=f)
    
    pprint(evaluate_dict)

    print("save evaluate dict at {}".format(output_path))
    print("done")
