from easycore.common.registry import Registry

METRIC_REGISTRY = Registry("metric")


class HighestRecallWithHighestPrecision:
    def __init__(self, metric_type='macro'):
        assert metric_type in ['macro', 'micro']
        self.metric_type = metric_type
        self.name = 'highest {} recall with highest {} precision'.format(metric_type, metric_type)
    
    def __call__(self, evaluate_dict_list):
        result_dict = {}
        for strategy in ['iou_cover', 'proposals_cover', 'gt_boxes_cover']:
            result_list = []
            max_recall = 0
            max_precision = 0
            for evaluate_dict in evaluate_dict_list:
                precision = evaluate_dict[strategy]['{}_precision'.format(self.metric_type)]
                recall = evaluate_dict[strategy]['{}_recall'.format(self.metric_type)]
                if recall > max_recall:
                    max_recall = recall
                    max_precision = precision
                    result_list = [evaluate_dict]
                elif recall == max_recall:
                    if precision > max_precision:
                        max_precision = precision
                        result_list = [evaluate_dict_list]
                    elif precision == max_precision:
                        result_list.append(evaluate_dict)
            result_dict[strategy] = result_list
        return result_dict

for metric_type in ['macro', 'micro']:
    metric = HighestRecallWithHighestPrecision(metric_type)
    METRIC_REGISTRY.register(metric.name, metric)




class HighestF1Score:
    def __init__(self, metric_type):
        assert metric_type in ['macro', 'micro']
        self.metric_type = metric_type
        self.name = 'highest {} f1 score'.format(metric_type)

    def __call__(self, evaluate_dict_list):
        result_dict={}
        for strategy in ['iou_cover', 'proposals_cover', 'gt_boxes_cover']:
            result_list = []
            max_f1_score = 0
            for evaluate_dict in evaluate_dict_list:
                precision = evaluate_dict[strategy]['{}_precision'.format(self.metric_type)]
                recall = evaluate_dict[strategy]['{}_recall'.format(self.metric_type)]
                if precision > 0 and recall > 0:
                    f1_score = 2 * precision * recall / (precision + recall)
                    if f1_score > max_f1_score:
                        max_f1_score = f1_score
                        result_list = [evaluate_dict]
                    elif f1_score == max_f1_score:
                        result_list.append(evaluate_dict)
            result_dict[strategy] = result_list
        return result_dict
        
for metric_type in ['macro', 'micro']:
    metric = HighestF1Score(metric_type)
    METRIC_REGISTRY.register(metric.name, metric)




class HighestBalanceScore:
    def __init__(self, metric_type, proposal_count_min, proposal_count_max):
        assert metric_type in ['macro', 'micro']
        self.metric_type = metric_type
        self.name = 'highest {} balance score between {} to {}'.format(metric_type, proposal_count_min, proposal_count_max)
        self.proposal_count_min = proposal_count_min
        self.proposal_count_max = proposal_count_max

    def __call__(self, evaluate_dict_list):
        """
        1 / (balance_score) = 1 / precision + 1 / recall + (a * number_of_proposals - b / number_of_proposals)^2
        a = 1 / (proposal_count_max - proposal_count_min)
        b = (proposal_count_max * proposal_count_min) / (proposal_count_max - proposal_count_min)
        """        
        result_dict={}
        for strategy in ['iou_cover', 'proposals_cover', 'gt_boxes_cover']:
            result_list = []
            max_score = 0
            for evaluate_dict in evaluate_dict_list:
                precision = evaluate_dict[strategy]['{}_precision'.format(self.metric_type)]
                recall = evaluate_dict[strategy]['{}_recall'.format(self.metric_type)]
                proposal_count = evaluate_dict['average_proposal_count']
                if precision > 0 and recall > 0 and proposal_count > 0.0:
                    a = 1/(self.proposal_count_max - self.proposal_count_min)
                    b = self.proposal_count_min * self.proposal_count_max / (self.proposal_count_max - self.proposal_count_min)
                    score = 1 / (1/precision + 1/recall + (a * proposal_count - b / proposal_count) ** 2)
                    if score > max_score:
                        max_score = score
                        result_list = [evaluate_dict]
                    elif score == max_score:
                        result_list.append(evaluate_dict)
            result_dict[strategy] = result_list
        return result_dict

for metric_type, proposal_count_min, proposal_count_max in [
        ['macro', 0.1, 5.0],
        ['macro', 0.5, 10.0],
        ['macro', 9.0, 11.0],
        ['macro', 19.0, 21.0],
        ['micro', 0.1, 5.0],
        ['micro', 0.5, 10.0],
        ['micro', 9.0, 11.0],
        ['micro', 19.0, 21.0],
    ]:
    metric = HighestBalanceScore(metric_type, proposal_count_min, proposal_count_max)
    METRIC_REGISTRY.register(metric.name, metric)


class HighestBalanceScoreWithClasses:
    def __init__(self, metric_type, proposal_count_min, proposal_count_max, class_id):
        assert metric_type in ['macro', 'micro']
        self.metric_type = metric_type
        self.name = 'highest {} balance score between {} to {} in class {}'.format(metric_type, proposal_count_min, proposal_count_max, class_id)
        self.proposal_count_min = proposal_count_min
        self.proposal_count_max = proposal_count_max
        self.class_id = class_id

    def __call__(self, evaluate_dict_list):
        """
        1 / (balance_score) = 1 / precision + 1 / recall + (a * number_of_proposals - b / number_of_proposals)^2
        a = 1 / (proposal_count_max - proposal_count_min)
        b = (proposal_count_max * proposal_count_min) / (proposal_count_max - proposal_count_min)
        """        
        result_dict={}
        for strategy in ['iou_cover', 'proposals_cover', 'gt_boxes_cover']:
            result_list = []
            max_score = 0
            for evaluate_dict in evaluate_dict_list:
                if self.class_id not in evaluate_dict["classes"]:
                    continue
                precision = evaluate_dict["classes"][self.class_id][strategy]['{}_precision'.format(self.metric_type)]
                recall = evaluate_dict["classes"][self.class_id][strategy]['{}_recall'.format(self.metric_type)]
                proposal_count = evaluate_dict["classes"][self.class_id]['average_proposal_count']
                if precision > 0 and recall > 0 and proposal_count > 0.0:
                    a = 1/(self.proposal_count_max - self.proposal_count_min)
                    b = self.proposal_count_min * self.proposal_count_max / (self.proposal_count_max - self.proposal_count_min)
                    score = 1 / (1/precision + 1/recall + (a * proposal_count - b / proposal_count) ** 2)
                    if score > max_score:
                        max_score = score
                        result_list = [evaluate_dict]
                    elif score == max_score:
                        result_list.append(evaluate_dict)
            result_dict[strategy] = result_list
        return result_dict

for metric_type, proposal_count_min, proposal_count_max, class_id in [
        ['macro', 9.0, 11.0, 0],
        ['micro', 9.0, 11.0, 0],
        ['macro', 9.0, 11.0, 1],
        ['micro', 9.0, 11.0, 1],
    ]:
    metric = HighestBalanceScoreWithClasses(metric_type, proposal_count_min, proposal_count_max, class_id)
    METRIC_REGISTRY.register(metric.name, metric)



class HighestRecallWithLeastPrecisionWithClass:
    def __init__(self, metric_type, least_precision, class_id):
        assert metric_type in ['macro', 'micro']
        self.metric_type = metric_type
        self.name = 'highest {} recall with least precision {} in class {}'.format(metric_type, least_precision, class_id)
        self.least_precision = least_precision
        self.class_id = class_id

    def __call__(self, evaluate_dict_list):
        """
        1 / (balance_score) = 1 / precision + 1 / recall + (a * number_of_proposals - b / number_of_proposals)^2
        a = 1 / (proposal_count_max - proposal_count_min)
        b = (proposal_count_max * proposal_count_min) / (proposal_count_max - proposal_count_min)
        """        
        result_dict={}
        for strategy in ['iou_cover', 'proposals_cover', 'gt_boxes_cover']:
            result_list = []
            max_recall = 0
            for evaluate_dict in evaluate_dict_list:
                if self.class_id not in evaluate_dict["classes"]:
                    continue
                precision = evaluate_dict["classes"][self.class_id][strategy]['{}_precision'.format(self.metric_type)]
                recall = evaluate_dict["classes"][self.class_id][strategy]['{}_recall'.format(self.metric_type)]
                
                if precision > self.least_precision and recall > 0:
                    if recall > max_recall:
                        max_recall = recall
                        result_list = [evaluate_dict]
                    elif recall == max_recall:
                        result_list.append(evaluate_dict)
            result_dict[strategy] = result_list
        return result_dict

for metric_type, least_precision, class_id in [
        ['micro', 0.3, 0],
        ['micro', 0.3, 1],
        ['micro', 0.35, 0],
        ['micro', 0.35, 1],
        ['micro', 0.4, 0],
        ['micro', 0.4, 1],
    ]:
    metric = HighestRecallWithLeastPrecisionWithClass(metric_type, least_precision, class_id)
    METRIC_REGISTRY.register(metric.name, metric)


class LowestProposalCountWithLeastRecallWithClass:
    def __init__(self, metric_type, least_recall, class_id):
        assert metric_type in ['macro', 'micro']
        self.metric_type = metric_type
        self.name = 'lowest average proposal count with least {} recall {} in class {}'.format(metric_type, least_recall, class_id)
        self.least_recall = least_recall
        self.class_id = class_id

    def __call__(self, evaluate_dict_list):
        result_dict = {}
        for strategy in ['iou_cover', 'proposals_cover', 'gt_boxes_cover']:
            result_list = []
            min_proposal_count = 100.0
            for evaluate_dict in evaluate_dict_list:
                if self.class_id not in evaluate_dict["classes"]:
                    continue
                #precision = evaluate_dict["classes"][self.class_id][strategy]['{}_precision'.format(self.metric_type)]
                recall = evaluate_dict["classes"][self.class_id][strategy]['{}_recall'.format(self.metric_type)]
                proposal_count = evaluate_dict["classes"][self.class_id]['average_proposal_count']
                
                if recall > self.least_recall and proposal_count > 0:
                    if min_proposal_count > proposal_count:
                        min_proposal_count = proposal_count
                        result_list = [evaluate_dict]
                    elif proposal_count == min_proposal_count:
                        result_list.append(evaluate_dict)
            result_dict[strategy] = result_list
        return result_dict

for metric_type, least_recall, class_id in [
        ['micro', 0.9, 0],
        ['micro', 0.9, 1],
        ['micro', 0.91, 0],
        ['micro', 0.91, 1],
        ['micro', 0.92, 0],
        ['micro', 0.92, 1],
        ['micro', 0.93, 0],
        ['micro', 0.93, 1],
        ['micro', 0.94, 0],
        ['micro', 0.94, 1],
        ['micro', 0.95, 0],
        ['micro', 0.95, 1],
        ['micro', 0.96, 0],
        ['micro', 0.96, 1],
        ['micro', 0.97, 0],
        ['micro', 0.97, 1],
        ['micro', 0.98, 0],
        ['micro', 0.98, 1],
        ['micro', 0.99, 0],
        ['micro', 0.99, 1],
    ]:
    metric = LowestProposalCountWithLeastRecallWithClass(metric_type, least_recall, class_id)
    METRIC_REGISTRY.register(metric.name, metric)
