#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import argparse
import logging as log
import os
import sys

import cv2
import numpy as np
from openvino.inference_engine import IECore


def parse_args() -> argparse.Namespace:
    """Parse and return command line arguments"""
    parser = argparse.ArgumentParser(add_help=False)
    args = parser.add_argument_group("Options")
    # fmt: off
    args.add_argument("-h", "--help", action="help", help="Show this help message and exit.")
    args.add_argument("-mn", "--model_name", required=True, type=str,
                      help="Required. Type of a trained model. Suitable for:"
                        "pedestrian-detection-adas-0002"
                        "person-detection-0200"
                        "person-detection-0201"
                        "person-detection-0202"
                        "face-detection-0200"
                        "face-detection-0202"
                        "face-detection-0204"
                        "face-detection-adas-0001"
                        "face-detection-retail-0004"
                        "face-detection-retail-0005"
                    )
    args.add_argument("-md","--model_dir", type=str, 
                        default="/home/pi/openvino/open_model_zoo/models/intel/", 
                        help="Optional. Path to the models directory"
                            "Defalut value is /home/pi/openvino/open_model_zoo/models/intel/")
    args.add_argument("-mc", "--model_acc", type=str, default="FP16", 
                        help="Optional. Model accuracy" "Defalut value is FP16")
    args.add_argument("-i", "--input", required=True, type=str, 
                        help="Required. Path to an image file.")
    args.add_argument("-l", "--extension", type=str, default=None,
                        help="Optional. Required by the CPU Plugin for executing the custom operation on a CPU. "
                        "Absolute path to a shared library with the kernels implementations.")
    args.add_argument("-c", "--config", type=str, default=None,
                        help="Optional. Required by GPU or VPU Plugins for the custom operation kernel. "
                        "Absolute path to operation description file (.xml).")
    # args.add_argument("-d", "--device", default="MYRIAD", type=str,
    #                   help="Optional. Specify the target device to infer on; CPU, GPU, MYRIAD, HDDL or HETERO: "
    #                        "is acceptable. The sample will look for a suitable plugin for device specified. "
    #                        "Default value is MYRIAD.")
    args.add_argument("--labels", default=None, type=str, 
                        help="Optional. Path to a labels mapping file.")
    # fmt: on
    return parser.parse_args()


def main():  # noqa
    log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
    args = parse_args()

    # ---------------------------Step 1. Initialize inference engine core--------------------------------------------------
    log.info("Creating Inference Engine")
    ie = IECore()

    # ---------------------------Step 2. Read a model in OpenVINO Intermediate Representation or ONNX format---------------
    log.info(f"Reading the network: {args.model_name}")
    # (.xml and .bin files) or (.onnx file)
    model_path = "{}/{}/{}/{}.xml".format(args.model_dir, args.model_name, args.model_acc, args.model_name)
    net = ie.read_network(model=model_path)

    if len(net.input_info) != 1:
        log.error("The sample supports only single input topologies")
        return -1

    if len(net.outputs) != 1 and not ("boxes" in net.outputs or "labels" in net.outputs):
        log.error("The sample supports models with 1 output or with 2 with the names 'boxes' and 'labels'")
        return -1

    # ---------------------------Step 3. Configure input & output----------------------------------------------------------
    log.info("Configuring input and output blobs")
    # Get name of input blob
    input_blob = next(iter(net.input_info))

    # Set input and output precision manually
    net.input_info[input_blob].precision = "U8"

    if len(net.outputs) == 1:
        output_blob = next(iter(net.outputs))
        net.outputs[output_blob].precision = "FP32"
    else:
        net.outputs["boxes"].precision = "FP32"
        # net.outputs["labels"].precision = "U16"
        # net.outputs["labels"].precision = "U8"

    # ---------------------------Step 4. Loading model to the device-------------------------------------------------------
    log.info("Loading the model to the plugin")
    exec_net = ie.load_network(network=net, device_name="MYRIAD")

    # ---------------------------Step 5. Create infer request--------------------------------------------------------------
    # load_network() method of the IECore class with a specified number of requests (default 1) returns an ExecutableNetwork
    # instance which stores infer requests. So you already created Infer requests in the previous step.

    # ---------------------------Step 6. Prepare input---------------------------------------------------------------------
    original_image = cv2.imread(args.input)
    image = original_image.copy()
    _, _, net_h, net_w = net.input_info[input_blob].input_data.shape

    if image.shape[:-1] != (net_h, net_w):
        log.warning(f"Image {args.input} is resized from {image.shape[:-1]} to {(net_h, net_w)}")
        image = cv2.resize(image, (net_w, net_h))

    # Change data layout from HWC to CHW
    image = image.transpose((2, 0, 1))
    # Add N dimension to transform to NCHW
    image = np.expand_dims(image, axis=0)

    # ---------------------------Step 7. Do inference----------------------------------------------------------------------
    log.info("Starting inference in synchronous mode")
    res = exec_net.infer(inputs={input_blob: image})

    # ---------------------------Step 8. Process output--------------------------------------------------------------------
    # Generate a label list
    if args.labels:
        with open(args.labels, "r") as f:
            labels = [line.split(",")[0].strip() for line in f]

    output_image = original_image.copy()
    h, w, _ = output_image.shape

    if len(net.outputs) == 1:
        res = res[output_blob]
        # Change a shape of a numpy.ndarray with results ([1, 1, N, 7]) to get another one ([N, 7]),
        # where N is the number of detected bounding boxes
        detections = res.reshape(-1, 7)
    else:
        detections = res["boxes"]
        labels = res["labels"]
        # Redefine scale coefficients
        w, h = w / net_w, h / net_h

    for i, detection in enumerate(detections):
        if len(net.outputs) == 1:
            _, class_id, confidence, xmin, ymin, xmax, ymax = detection
        else:
            class_id = labels[i]
            xmin, ymin, xmax, ymax, confidence = detection

        if confidence > 0.5:
            label = int(labels[class_id]) if args.labels else int(class_id)

            xmin = int(xmin * w)
            ymin = int(ymin * h)
            xmax = int(xmax * w)
            ymax = int(ymax * h)

            log.info(f"Found: label = {label}, confidence = {confidence:.2f}, " f"coords = ({xmin}, {ymin}), ({xmax}, {ymax})")

            # Draw a bounding box on a output image
            cv2.rectangle(output_image, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2)

    output_filename = "out_{}_{}.bmp".format(args.model_name, args.model_acc)
    cv2.imwrite(output_filename, output_image)
    if os.path.exists(output_filename):
        log.info("Image {} created!".format(output_filename))
    else:
        log.error("Image {} was not created. Check your permissions.".format(output_filename))

    # ----------------------------------------------------------------------------------------------------------------------
    log.info("This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool\n")
    return 0


if __name__ == "__main__":
    sys.exit(main())
