import torch
import numpy as np
import os
from tqdm import tqdm
import argparse
import pickle
import shutil
import copy
from pprint import pprint

from data.utils import read_image
from data.dataset import build_dataset
from engine import Predictor
from utils.config import get_test_cfg
from utils.boxes import modified_postprocess as postprocess
from utils.evaluate import get_area, get_intersection_area_matrix, evaluate_matrix2, get_precision, get_recall


def parse_args():
    parser = argparse.ArgumentParser(description="predict net")
    parser.add_argument("--model", type=str, help='model name', required=True)
    parser.add_argument("--datasets", type=str, help='train datasets name', required=True)
    parser.add_argument("--val-datasets", type=str, help='val datasets name', required=True)
    parser.add_argument("--test-datasets", type=str, help='datasets for sampling', required=True)
    parser.add_argument("--checkpoint", type=str, help="checkpoint iter, can be 'last', 'best', iternumber", default='best')
    parser.add_argument("--gpus", type=str, help="use gpus id", default='0')
    parser.add_argument("--batch-size", type=int, help='batch size', default=16)
    parser.add_argument("--threshold", type=float, help='threshold', default=0.2)
    parser.add_argument("--nms-threshold", type=float, help='nms threshold', default=0.5)
    parser.add_argument("--iou-cover-threshold", type=float, help='iou cover threshold', default=0.5)
    parser.add_argument("--proposals-cover-threshold", type=float, help='proposals cover threshold', default=0.7)
    parser.add_argument("--gt-boxes-cover-threshold", type=float, help='gt boxes cover threshold', default=0.7)
    args = parser.parse_args()
    return args

class FullDataset:
    def __init__(self, dataset_names):
        self.dataset_dicts = build_dataset(*dataset_names)

    def __len__(self):
        return len(self.dataset_dicts)

    def __getitem__(self, index):
        filename = self.dataset_dicts[index]['file_name']
        gt_boxes = [obj['bbox'] for obj in self.dataset_dicts[index]['annotations']]
        gt_boxes = np.array(gt_boxes).reshape(-1, 4)
        gt_classification = [obj['category_id'] for obj in self.dataset_dicts[index]['annotations']]
        gt_classification = np.array(gt_classification, dtype=np.uint8)
        image = read_image(filename, preprocess_method=self.dataset_dicts[index]['preprocess'], params=self.dataset_dicts[index]['params'])
        return filename, image, gt_boxes, gt_classification

class BatchDataset:
    def __init__(self, dataset, batch_size, drop_last=False):
        self.dataset = dataset
        self.drop_last = drop_last
        self.batch_size = batch_size

        assert len(self) > 0

    def __len__(self):
        length = len(self.dataset) // self.batch_size
        if not self.drop_last and len(self.dataset) % self.batch_size != 0:
            length += 1
        return length

    def __getitem__(self, index):
        start, stop, step = slice(index * self.batch_size, (index + 1) * self.batch_size).indices(len(self.dataset))
        return zip(*[self.dataset[i] for i in range(start, stop, step)])
        
    def __iter__(self):
        for i in range(len(self)):
            yield self[i]

def predict_and_evaluate(predictor, dataset, **kwargs):
    evaluate_dict = {
        'threshold': kwargs['threshold'],
        'nms_threshold': kwargs['nms_threshold'],
        'classes': {},
    }
    class_evaluate_dict_template = {
        'image_count': 0,
        'average_proposal_count': 0,
        'iou_cover': {
            'threshold': kwargs['iou_cover_threshold'],
            'TP': 0,
            'TP_gt': 0,
            'FP': 0,
            'FN': 0,
            'macro_precision': 0,
            'macro_recall': 0,
            'micro_precision': 0,
            'micro_recall': 0,
        },
        'proposals_cover': {
            'threshold': kwargs['proposals_cover_threshold'],
            'TP': 0,
            'TP_gt': 0,
            'FP': 0,
            'FN': 0,
            'macro_precision': 0,
            'macro_recall': 0,
            'micro_precision': 0,
            'micro_recall': 0,
        },
        'gt_boxes_cover': {
            'threshold': kwargs['gt_boxes_cover_threshold'],
            'TP': 0,
            'TP_gt': 0,
            'FP': 0,
            'FN': 0,
            'macro_precision': 0,
            'macro_recall': 0,
            'micro_precision': 0,
            'micro_recall': 0,
        },
    }
    
    for filename_list, image_list, gt_boxes_list, gt_classification_list in tqdm(dataset):
        data_dict = predictor(image_list)
        
        for filename, gt_boxes_all_classes, gt_classification, bboxes, classification in zip(filename_list, gt_boxes_list, gt_classification_list, data_dict['bboxes'], data_dict['classification']):
            #bboxes = bboxes.cpu()
            #classification = classification.cpu()
            gt_class_ids_set = set(gt_classification)

            data = postprocess(bboxes, classification, kwargs['threshold'], kwargs['nms_threshold'])
            class_ids_set = set(data['class_ids']).intersection(gt_class_ids_set)

            for class_id in class_ids_set:
                if class_id not in evaluate_dict["classes"]:
                    evaluate_dict["classes"][class_id] = copy.deepcopy(class_evaluate_dict_template)

                evaluate_dict['classes'][class_id]['image_count'] += 1

                proposals = data['rois'][data['class_ids'] == class_id]
                gt_boxes = gt_boxes_all_classes[gt_classification == class_id]

                proposals_area = get_area(proposals)
                gt_boxes_area = get_area(gt_boxes)
                intersection_area_matrix = get_intersection_area_matrix(proposals, gt_boxes)
                union_area_matrix = proposals_area.reshape(-1,1) + gt_boxes_area.reshape(1,-1) - intersection_area_matrix

                temp = {
                    'iou_cover': union_area_matrix,
                    'proposals_cover': proposals_area.reshape(-1,1),
                    'gt_boxes_cover': gt_boxes_area.reshape(1,-1)
                }

                for strategy, matrix in temp.items():
                    matrix = intersection_area_matrix / matrix
                    TP, TP_gt, FP, FN = evaluate_matrix2(matrix, evaluate_dict["classes"][class_id][strategy]['threshold'])
                    evaluate_dict["classes"][class_id][strategy]['TP'] += TP
                    evaluate_dict["classes"][class_id][strategy]['TP_gt'] += TP_gt
                    evaluate_dict["classes"][class_id][strategy]['FP'] += FP
                    evaluate_dict["classes"][class_id][strategy]['FN'] += FN
                    evaluate_dict["classes"][class_id][strategy]['macro_precision'] += get_precision(TP, FP, FN)
                    evaluate_dict["classes"][class_id][strategy]['macro_recall'] += get_recall(TP_gt, FP, FN)

                evaluate_dict["classes"][class_id]['average_proposal_count'] += len(proposals)
                

    for class_id in evaluate_dict['classes'].keys():
        evaluate_dict["classes"][class_id]['average_proposal_count'] /= evaluate_dict["classes"][class_id]['image_count']
        
    for strategy in ['iou_cover', 'proposals_cover', 'gt_boxes_cover']:
        for class_id in evaluate_dict['classes'].keys():
            evaluate_dict["classes"][class_id][strategy]['macro_precision'] /= evaluate_dict["classes"][class_id]['image_count']
            evaluate_dict["classes"][class_id][strategy]['macro_recall'] /= evaluate_dict["classes"][class_id]['image_count']
            evaluate_dict["classes"][class_id][strategy]['micro_precision'] = get_precision(evaluate_dict["classes"][class_id][strategy]['TP'], evaluate_dict["classes"][class_id][strategy]['FP'], evaluate_dict["classes"][class_id][strategy]['FN'])
            evaluate_dict["classes"][class_id][strategy]['micro_recall'] = get_recall(evaluate_dict["classes"][class_id][strategy]['TP_gt'], evaluate_dict["classes"][class_id][strategy]['FP'], evaluate_dict["classes"][class_id][strategy]['FN'])

    return evaluate_dict



if __name__ == '__main__':
    args = parse_args()
    config_path = 'configs/model/{}.yaml'.format(args.model)
    cfg = get_test_cfg(config_path)

    train_datasets = ["{}-train".format(dataset) for dataset in args.datasets.split(',')]
    val_datasets = ["{}-test".format(dataset) for dataset in args.val_datasets.split(',')]
    test_datasets = ["{}-test".format(dataset) for dataset in args.test_datasets.split(',')]

    cfg.OUTPUT_DIR = os.path.join(cfg.OUTPUT_DIR, "checkpoint", "{}/[train]-{}/[val]-{}".format(args.model, '&'.join(train_datasets), '&'.join(val_datasets)))
    
    if args.checkpoint == 'best':
        cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, 'model_best.pth')
    elif args.checkpoint == 'last':
        cfg.MODEL.WEIGHTS = None
    else:
        cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, 'model_{:0>8}.pth'.format(args.checkpoint))
    
    cfg.MODEL.DEVICES = ["cuda:{}".format(gpu) for gpu in args.gpus.split(',')]

    predictor = Predictor(cfg)

    dataset = BatchDataset(FullDataset(test_datasets), batch_size=args.batch_size, drop_last=False)

    
    kwargs = {
        'threshold': args.threshold,
        'nms_threshold': args.nms_threshold,
        'iou_cover_threshold': args.iou_cover_threshold,
        'proposals_cover_threshold': args.proposals_cover_threshold,
        'gt_boxes_cover_threshold': args.gt_boxes_cover_threshold,
    }

    evaluate_dict = predict_and_evaluate(predictor, dataset, **kwargs)

    output_path = os.path.join('output','evaluate3_pipeline', '{}/[train]-{}/[val]-{}/[test]-{}-[checkpoint]-{}/[nms]-{}-[evaluate]-{}-{}-{}.txt'.format(args.model, '&'.join(train_datasets), '&'.join(val_datasets), '&'.join(test_datasets), args.checkpoint, args.nms_threshold, args.iou_cover_threshold, args.proposals_cover_threshold, args.gt_boxes_cover_threshold))
    os.makedirs(os.path.dirname(output_path), exist_ok=True)
    with open(output_path, 'w', encoding='utf-8') as f:
        pprint(evaluate_dict, stream=f)
    
    pprint(evaluate_dict)

    print("save evaluate dict at {}".format(output_path))
    print("done")
