# -*- coding: utf-8 -*-

# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""main func to eval model."""

import argparse
import datetime
import os
import time

import numpy as np
from pycocotools.coco import COCO

from mindspore import context, set_seed
import mindspore.common.dtype as mstype
from mindspore.train.serialization import load_checkpoint, load_param_into_net

from mindvision.detection.models.builder import build_detector
from mindvision.detection.utils.coco_utils import bbox2result_1image, coco_eval, results2json

from mindvision.common.dataset.dataloader import build_dataloader
from mindvision.common.utils.config import Config, ActionDict
from mindvision.common.log import info

import datasets  # pylint: disable=unused-import


def parse_arguments():
    """parse eval arguments"""
    parser = argparse.ArgumentParser(description="evaluation")
    parser.add_argument("--data_url", type=str, default="", help="Dataset path for train on ModelArts platform")
    parser.add_argument("--eval_url", type=str, default="", help="Eval file outputs path on ModelArts platform")
    parser.add_argument("--eval_data", type=str, default="", help="Eval file inputs path on ModelArts platform")
    parser.add_argument('--is_modelarts', type=bool, default=False, help='Whether to run on the modelarts platform')
    parser.add_argument('--config', help='config file path')
    parser.add_argument("--ann_file", type=str, default="", help="Ann file, default is val.json.")
    parser.add_argument("--checkpoint_path", type=str, required=True, help="Checkpoint file path.")
    parser.add_argument('--work_dir', default='./', help='the path to save logs and models')
    parser.add_argument("--device_target", type=str, default="Ascend",
                        help="device where the code will be implemented, default is Ascend")
    parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.")
    parser.add_argument('--seed', default=1, help='the random seed')
    parser.add_argument(
        '--options',
        nargs='+',
        action=ActionDict,
        help='override some settings in the used config, the key-value pair'
             'in xxx=yyy format will be merged into config file')
    args_opt = parser.parse_args()
    return args_opt


def main():
    args = parse_arguments()
    set_seed(args.seed)

    cfg = Config(args.config)
    if args.options is not None:
        cfg.merge_from_dict(args.options)
    eval_cfg = cfg.eval

    # if the code runs in ModelArts, copy train dataset to ModelArts Training Workspace
    if args.is_modelarts:
        import moxing as mox

        if not os.path.exists(args.eval_data):
            os.makedirs(args.eval_data)
        mox.file.copy_parallel(args.data_url, args.eval_data)

    # create work path
    if not os.path.isdir(args.work_dir):
        os.makedirs(args.work_dir)

    # init logger
    eval_cfg.outputs_dir = os.path.join(args.work_dir,
                                        eval_cfg.ckpt_path,
                                        datetime.datetime.now().
                                        strftime('%Y-%m-%d_%H_%M_%S'))

    # init context
    if args.device_id:
        eval_cfg.context.device_id = int(args.device_id)
    context.set_context(**eval_cfg.context)

    # create model
    network = build_detector(cfg.model)
    network.set_train(False)
    info('Finish build network')

    # create dataset
    data_loader = build_dataloader(cfg.data_loader, False)
    ds = data_loader()
    data_size = ds.get_dataset_size()
    info('Finish loading dataset, data_size: {}'.format(data_size))

    # load model
    if args.checkpoint_path:
        eval_cfg.ckpt_path = args.checkpoint_path
    if args.ann_file:
        eval_cfg.ann_file = args.ann_file

    param_dict = load_checkpoint(eval_cfg.ckpt_path)
    load_param_into_net(network, param_dict)
    network.to_float(mstype.float16)

    # Starting predict pictures
    outputs = do_eval(ds, network, eval_cfg)

    # TODO encapsulate eval operations in coco dataset
    dataset_coco = COCO(eval_cfg.ann_file)
    eval_types = ["bbox"]
    result_files = results2json(dataset_coco, outputs, "./results.pkl")
    coco_eval(result_files, eval_types, dataset_coco, single_result=False)


def do_eval(dataset, network, config):
    """Do evaluation."""

    eval_iter = 0
    outputs = []
    print("\n========================================\n")
    print("Total images num: ", dataset.get_dataset_size())
    print("Processing, please wait a moment.")
    max_num = 128
    for data in dataset.create_dict_iterator(num_epochs=1):
        eval_iter = eval_iter + 1
        img_data = data['image']
        img_metas = data['image_shape']
        gt_bboxes = data['bboxes']
        gt_labels = data['labels']
        gt_num = data['valid_num']

        start = time.time()
        output = network(img_data, img_metas, gt_bboxes, gt_labels, gt_num)
        end = time.time()
        print("Iter {} cost time {}".format(eval_iter, end - start))

        # output
        all_bbox = output[0]
        all_label = output[1]
        all_mask = output[2]

        for j in range(config.test_batch_size):
            all_bbox_squee = np.squeeze(all_bbox.asnumpy()[j, :, :])
            all_label_squee = np.squeeze(all_label.asnumpy()[j, :, :])
            all_mask_squee = np.squeeze(all_mask.asnumpy()[j, :, :])

            all_bboxes_tmp_mask = all_bbox_squee[all_mask_squee, :]
            all_labels_tmp_mask = all_label_squee[all_mask_squee]

            if all_bboxes_tmp_mask.shape[0] > max_num:
                inds = np.argsort(-all_bboxes_tmp_mask[:, -1])
                inds = inds[:max_num]
                all_bboxes_tmp_mask = all_bboxes_tmp_mask[inds]
                all_labels_tmp_mask = all_labels_tmp_mask[inds]

            outputs_tmp = bbox2result_1image(all_bboxes_tmp_mask, all_labels_tmp_mask, config.num_classes)

            outputs.append(outputs_tmp)

    return outputs


if __name__ == '__main__':
    main()
