# Copyright (c) OpenMMLab. All rights reserved.
import bisect
import os.path as osp

import mmcv
import torch.distributed as dist
from mmcv.runner import DistEvalHook as BaseDistEvalHook
from mmcv.runner import EvalHook as BaseEvalHook
from torch.nn.modules.batchnorm import _BatchNorm


def _calc_dynamic_intervals(start_interval, dynamic_interval_list):
    assert mmcv.is_list_of(dynamic_interval_list, tuple)

    dynamic_milestones = [0]
    dynamic_milestones.extend(
        [dynamic_interval[0] for dynamic_interval in dynamic_interval_list])
    dynamic_intervals = [start_interval]
    dynamic_intervals.extend(
        [dynamic_interval[1] for dynamic_interval in dynamic_interval_list])
    return dynamic_milestones, dynamic_intervals


class EvalHook(BaseEvalHook):

    def __init__(self, *args, dynamic_intervals=None, **kwargs):
        super(EvalHook, self).__init__(*args, **kwargs)

        self.use_dynamic_intervals = dynamic_intervals is not None
        if self.use_dynamic_intervals:
            self.dynamic_milestones, self.dynamic_intervals = \
                _calc_dynamic_intervals(self.interval, dynamic_intervals)

    def _decide_interval(self, runner):
        if self.use_dynamic_intervals:
            progress = runner.epoch if self.by_epoch else runner.iter
            step = bisect.bisect(self.dynamic_milestones, (progress + 1))
            # Dynamically modify the evaluation interval
            self.interval = self.dynamic_intervals[step - 1]

    def before_train_epoch(self, runner):
        """Evaluate the model only at the start of training by epoch."""
        self._decide_interval(runner)
        super().before_train_epoch(runner)

    def before_train_iter(self, runner):
        self._decide_interval(runner)
        super().before_train_iter(runner)

    def _do_evaluate(self, runner):
        """perform evaluation and save ckpt."""
        if not self._should_evaluate(runner):
            return

        from mmdet.apis import single_gpu_test
        results = single_gpu_test(runner.model, self.dataloader, show=False)
        runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
        key_score = self.evaluate(runner, results)
        if self.save_best:
            self._save_ckpt(runner, key_score)
    
    
    def evaluate(self, runner, results):
        """Evaluate the results.

        Args:
            runner (:obj:`mmcv.Runner`): The underlying training runner.
            results (dict): Output results containing keys such as 'det' and 'seg'.
        """
        save_best_value = None

        # Evaluate detection results

        # Evaluate segmentation results
        if 'seg_out' in results[0]:
            eval_res = self.dataloader.dataset.evaluate_seg(results, logger=runner.logger, **self.eval_kwargs)

        if 'cls_out' in results[0]:
            eval_res = self.dataloader.dataset.evaluate_cls(results, logger=runner.logger, **self.eval_kwargs)
            for name, val in eval_res.items():
                runner.log_buffer.output['cls_' + name] = val

        if 'det_out' in results[0]:
            eval_res = self.dataloader.dataset.evaluate(
                results, logger=runner.logger, **self.eval_kwargs)
            for name, val in eval_res.items():
                runner.log_buffer.output[name] = val

        runner.log_buffer.ready = True

        if self.save_best is not None:
            if self.key_indicator == 'auto':
                # infer from eval_results
                self._init_rule(self.rule, list(eval_res.keys())[0])
            return eval_res[self.key_indicator]

        return None






# Note: Considering that MMCV's EvalHook updated its interface in V1.3.16,
# in order to avoid strong version dependency, we did not directly
# inherit EvalHook but BaseDistEvalHook.


class DistEvalHook(BaseDistEvalHook):

    def __init__(self, *args, dynamic_intervals=None, **kwargs):
        super(DistEvalHook, self).__init__(*args, **kwargs)

        self.use_dynamic_intervals = dynamic_intervals is not None
        if self.use_dynamic_intervals:
            self.dynamic_milestones, self.dynamic_intervals = \
                _calc_dynamic_intervals(self.interval, dynamic_intervals)

    def _decide_interval(self, runner):
        if self.use_dynamic_intervals:
            progress = runner.epoch if self.by_epoch else runner.iter
            step = bisect.bisect(self.dynamic_milestones, (progress + 1))
            # Dynamically modify the evaluation interval
            self.interval = self.dynamic_intervals[step - 1]

    def before_train_epoch(self, runner):
        """Evaluate the model only at the start of training by epoch."""
        self._decide_interval(runner)
        super().before_train_epoch(runner)

    def before_train_iter(self, runner):
        self._decide_interval(runner)
        super().before_train_iter(runner)

    
    # def _init_rule(self, rule, key_indicator):
    #     """Initialize rule, key_indicator, comparison_func, and best score.

    #     Here is the rule to determine which rule is used for key indicator
    #     when the rule is not specific (note that the key indicator matching
    #     is case-insensitive):
    #     1. If the key indicator is in ``self.greater_keys``, the rule will be
    #        specified as 'greater'.
    #     2. Or if the key indicator is in ``self.less_keys``, the rule will be
    #        specified as 'less'.
    #     3. Or if the key indicator is equal to the substring in any one item
    #        in ``self.greater_keys``, the rule will be specified as 'greater'.
    #     4. Or if the key indicator is equal to the substring in any one item
    #        in ``self.less_keys``, the rule will be specified as 'less'.

    #     Args:
    #         rule (str | None): Comparison rule for best score.
    #         key_indicator (str | None): Key indicator to determine the
    #             comparison rule.
    #     """
    #     if rule not in self.rule_map and rule is not None:
    #         raise KeyError(f'rule must be greater, less or None, '
    #                        f'but got {rule}.')
    #     # if rule == 'all':
    #     #     key_indicator = 'all'

    #     if rule is None:
    #         if key_indicator != 'auto':
    #             # `_lc` here means we use the lower case of keys for
    #             # case-insensitive matching
    #             key_indicator_lc = key_indicator.lower()
    #             greater_keys = [key.lower() for key in self.greater_keys]
    #             less_keys = [key.lower() for key in self.less_keys]

    #             if key_indicator_lc in greater_keys:
    #                 rule = 'greater'
    #             elif key_indicator_lc in less_keys:
    #                 rule = 'less'
    #             elif any(key in key_indicator_lc for key in greater_keys):
    #                 rule = 'greater'
    #             elif any(key in key_indicator_lc for key in less_keys):
    #                 rule = 'less'
    #             else:
    #                 raise ValueError(f'Cannot infer the rule for key '
    #                                  f'{key_indicator}, thus a specific rule '
    #                                  f'must be specified.')
    #     self.rule = rule
    #     self.key_indicator = key_indicator
    #     # if self.rule is not None:
    #     #     self.compare_func = self.rule_map[self.rule]


    def _do_evaluate(self, runner):
        """perform evaluation and save ckpt."""
        # Synchronization of BatchNorm's buffer (running_mean
        # and running_var) is not supported in the DDP of pytorch,
        # which may cause the inconsistent performance of models in
        # different ranks, so we broadcast BatchNorm's buffers
        # of rank 0 to other ranks to avoid this.
        if self.broadcast_bn_buffer:
            model = runner.model
            for name, module in model.named_modules():
                if isinstance(module,
                              _BatchNorm) and module.track_running_stats:
                    dist.broadcast(module.running_var, 0)
                    dist.broadcast(module.running_mean, 0)

        if not self._should_evaluate(runner):
            return

        tmpdir = self.tmpdir
        if tmpdir is None:
            tmpdir = osp.join(runner.work_dir, '.eval_hook')

        from mmdet.apis import multi_gpu_test
        results = multi_gpu_test(
            runner.model,
            self.dataloader,
            tmpdir=tmpdir,
            gpu_collect=self.gpu_collect)
        if runner.rank == 0:
            print('\n')
            runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
            key_score = self.evaluate(runner, results)

            if self.save_best:
                self._save_ckpt(runner, key_score)
                
    def evaluate(self, runner, results):
        """Evaluate the results.

        Args:
            runner (:obj:`mmcv.Runner`): The underlying training runner.
            results (dict): Output results containing keys such as 'det' and 'seg'.
        """
        save_best_value = None

        # Evaluate detection results
        from pprint import pprint
        # Evaluate segmentation results
        if 'seg_out' in results[0]:
            eval_res = self.dataloader.dataset.evaluate_seg(results, logger=runner.logger, **self.eval_kwargs)
            seg_res = eval_res['mIoU']
            runner.log_buffer.output['seg_mIoU'] = eval_res['mIoU']

            if 'cls_out' in results[0]:
                self.key_indicator == 'all'
            
        if 'cls_out' in results[0]:
            eval_res = self.dataloader.dataset.evaluate_cls(results, logger=runner.logger, **self.eval_kwargs)
            for name, val in eval_res.items():
                runner.log_buffer.output['cls_' + name] = val
            cls_res = eval_res['f1_score']

        if 'det_out' in results[0]:
            eval_res = self.dataloader.dataset.evaluate(
                results, logger=runner.logger, **self.eval_kwargs)
            for name, val in eval_res.items():
                runner.log_buffer.output[name] = val
            rd_res = eval_res['mAP']
        
        if self.key_indicator == 'all':
        
            runner.log_buffer.output['all'] = seg_res+cls_res+rd_res

        runner.log_buffer.ready = True

        if self.save_best is not None:
            if self.key_indicator == 'auto':
                # infer from eval_results
                self._init_rule(self.rule, list(eval_res.keys())[0])
            if self.key_indicator == 'all':
                # print(seg_res,cls_res,rd_res)
                return seg_res+cls_res+rd_res
            return eval_res[self.key_indicator]
            

        return None
