from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
import sys
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(cur_dir)
sys.path.insert(0, os.path.abspath(os.path.join(cur_dir, '..')))

import warnings
warnings.filterwarnings('ignore')

import paddle
import numpy as np

from ppdet.core.workspace import create, load_config, merge_config
from ppdet.utils.check import check_gpu, check_npu, check_xpu, check_mlu, check_version, check_config
from ppdet.utils.cli import ArgsParser, merge_args
from ppdet.engine import Trainer, init_parallel_env
from ppdet.metrics.coco_utils import json_eval_results
from ppdet.slim import build_slim_model
from ppdet.utils.logger import setup_logger
from paddleslim.analysis import flops ,model_size

logger = setup_logger('eval')

def parse_args():
    parser = ArgsParser()
    parser.add_argument(
        "--output_eval",
        default=None,
        type=str,
        help="Evaluation directory, default is current directory.")
    parser.add_argument(
        '--json_eval',
        action='store_true',
        default=False,
        help='Whether to re eval with already exists bbox.json or mask.json')
    parser.add_argument(
        "--slim_config",
        default=None,
        type=str,
        help="Configuration file of slim method.")
    parser.add_argument(
        "--bias",
        action="store_true",
        help="whether add bias or not while getting w and h")
    parser.add_argument(
        "--classwise",
        action="store_true",
        help="whether per-category AP and draw P-R Curve or not.")
    parser.add_argument(
        '--save_prediction_only',
        action='store_true',
        default=False,
        help='Whether to save the evaluation results only')
    parser.add_argument(
        "--amp",
        action='store_true',
        default=False,
        help="Enable auto mixed precision eval.")
    # Small object detection & slice unum
    parser.add_argument(
        "--slice_infer",
        action='store_true',
        help="Whether to slice the image and merge the inference results for small object detection.")
    parser.add_argument(
        '--slice_size',
        nargs='+',
        type=int,
        default=[640, 640],
        help="Height of the sliced image.")
    parser.add_argument(
        "--overlap_ratio",
        nargs='+',
        type=float,
        default=[0.25, 0.25],
        help="Overlap height ratio of the sliced image.")
    parser.add_argument(
        "--combine_method",
        type=str,
        default='nms',
        help="Combine method of the sliced images' detection results, choose in ['nms', 'nmm', 'concat'].")
    parser.add_argument(
        "--match_threshold",
        type=float,
        default=0.6,
        help="Combine method matching threshold.")
    parser.add_argument(
        "--match_metric",
        type=str,
        default='ios',
        help="Combine method matching metric, choose in ['iou', 'ios'].")
    args = parser.parse_args()
    return args

def run(args, cfg):
    if args.json_eval:
        logger.info(
            "In json_eval mode, PaddleDetection will evaluate json files in "
            "output_eval directly. And proposal.json, bbox.json and mask.json "
            "will be detected by default.")
        json_eval_results(
            cfg.metric,
            json_directory=args.output_eval,
            dataset=create('EvalDataset')())
        return

    # init parallel environment if nranks > 1
    init_parallel_env()

    # build trainer
    trainer = Trainer(cfg, mode='eval')

    # load weights
    trainer.load_weights(cfg.weights)
    
     # ----这里追加FLOPs报告---
    print("="*30)
    print("Start Analyze model with paddleslim...")
    
    model = trainer.model
    model.eval()
    
    dummy_input_shape = [1, 3, 640, 640]   # 实际需要和模型一致（查ImageNet/COCO设置）
    inputs = {'image': paddle.randn( dummy_input_shape),
              "im_shape": dummy_input_shape,
              "scale_factor":np.array([1.0,1.0])}
    flops_out = flops(model, inputs)
    
    print("【Model】: Flops(Mac)= {:.2f}".format( flops_out/1e9)  )
    print("="*30)
    
    ## =========== Params ------------- 老版本通用
    param_count = sum([p.numel() for p in trainer.model.parameters()])
    param_count = param_count.item() 
    print('模型参数总参数量: {:.2f}M'.format(param_count/1e6))

    # evaluate
    if args.slice_infer:
        trainer.evaluate_slice(
            slice_size=args.slice_size,
            overlap_ratio=args.overlap_ratio,
            combine_method=args.combine_method,
            match_threshold=args.match_threshold,
            match_metric=args.match_metric)
    else:
        trainer.evaluate()

def main():
    args = parse_args()
    cfg = load_config(args.config)
    merge_args(cfg, args)
    merge_config(args.opt)

    # set backend device options
    cfg.setdefault('use_npu', False)
    cfg.setdefault('use_xpu', False)
    cfg.setdefault('use_gpu', False)
    cfg.setdefault('use_mlu', False)

    if cfg.use_gpu:
        paddle.set_device('gpu')
    elif cfg.use_npu:
        paddle.set_device('npu')
    elif cfg.use_xpu:
        paddle.set_device('xpu')
    elif cfg.use_mlu:
        paddle.set_device('mlu')
    else:
        paddle.set_device('cpu')

    if args.slim_config:
        cfg = build_slim_model(cfg, args.slim_config, mode='eval')

    check_config(cfg)
    check_gpu(cfg.use_gpu)
    check_npu(cfg.use_npu)
    check_xpu(cfg.use_xpu)
    check_mlu(cfg.use_mlu)
    check_version()

    run(args, cfg)

if __name__ == '__main__':
    main()