import sys
import os
import os.path as osp
from argparse import ArgumentParser, SUPPRESS

from abc import ABC, abstractmethod
from openvino.inference_engine import IECore

from ...util.logger import get_logger


logger = get_logger(__name__)


class OpenVinoModel(ABC):
    version = '2020 R3'
    model_base_url = 'https://download.01.org/opencv/2020/openvinotoolkit/2020.3/open_model_zoo/models_bin/1/'
    
    def __init__(self, xml_path=None, fp=None, conf=0.6, draw=False):
        # logger = get_logger(self.__class__.__name__)
        args = build_argparser()
        self.args = self.__set_args(args, xml_path, fp, conf, draw)
        self.__validate_model()
        self.__set_net()
        self.__net_params()

    def __set_args(self, args, xml_path, fp, conf, draw):
        if xml_path is not None:
            args.model = xml_path
        if fp is not None:
            args.fp = fp
        if conf is not None:
            args.conf = conf
        if draw is not None:
            args.draw = draw
        return args
    
    def __validate_model(self):
        if self.args.model is None:
            from os.path import expanduser
            model_dir = osp.join(expanduser("~"), '.pyvino', self.model_name, self.args.fp)
            xml_path = osp.join(model_dir, '{}.xml'.format(self.model_name))
            bin_path = osp.join(model_dir, '{}.bin'.format(self.model_name))
            
            if self.model_loc=='intel':
                xml_url = osp.join(self.model_base_url, self.model_name, self.args.fp, '{}.xml'.format(self.model_name))
                bin_url = osp.join(self.model_base_url, self.model_name, self.args.fp, '{}.bin'.format(self.model_name))
            elif self.model_loc=='google':
                xml_url = self.xml_url
                bin_url = self.bin_url
            else:
                raise ValueError()

            if not osp.exists(xml_path):
                self.__download(xml_url, xml_path)
            if not osp.exists(bin_path):
                self.__download(bin_url, bin_path)
            
            self.args.model = xml_path
            
    def __download(self, url, path_save):
        os.makedirs(osp.dirname(path_save), exist_ok=True)
        if url.startswith('https://download'):
            import ssl
            import urllib
            ssl._create_default_https_context = ssl._create_unverified_context
            urllib.request.urlretrieve(url, path_save)
        else:
            from google_drive_downloader import GoogleDriveDownloader as gdd
            gdd.download_file_from_google_drive(file_id=url,
                                                dest_path=path_save)
        logger.info('download model from {} and save to {}'.format(url, path_save))
    
    def __set_net(self):
        # ------------- 1. Plugin initialization for specified device and load extensions library if specified -------------
        logger.info("Creating Inference Engine...")
        self.ie = IECore()
        # -------------------- 2. Reading the IR generated by the Model Optimizer (.xml and .bin files) --------------------
        logger.info("Loading network")
        self.net = self.ie.read_network(self.args.model, osp.splitext(self.args.model)[0] + ".bin")
        # ----------------------------------------- 5. Loading model to the plugin -----------------------------------------
        logger.info("Loading model to the plugin")
        self.exec_net = self.ie.load_network(network=self.net, num_requests=2, device_name=self.args.device)

        # ---------------------------------- 3. Load CPU extension for support specific layer ------------------------------
        if "CPU" in self.args.device:
            supported_layers = self.ie.query_network(self.net, "CPU")
            not_supported_layers = [l for l in self.net.layers.keys() if l not in supported_layers]
            if len(not_supported_layers) != 0:
                logger.error("Following layers are not supported by the plugin for specified device {}:\n {}".
                          format(self.args.device, ', '.join(not_supported_layers)))
                logger.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
                          "or --cpu_extension command line argument")
                sys.exit(1)

        # assert len(self.net.inputs.keys()) == 1, "Sample supports only YOLO V3 based single input topologies"
    
    def __net_params(self):
        # ---------------------------------------------- 4. Preparing inputs -----------------------------------------------
        logger.info("Preparing inputs")
        self.input_blob = next(iter(self.net.inputs))
        self.output_blob = next(iter(self.net.outputs))

        #  Defaulf batch_size is 1
        self.net.batch_size = 1

        # Read and pre-process input images
        # n, c, h, w = net.inputs[input_blob].shape
        self.n, self.c, self.h, self.w = self.net.inputs[self.input_blob].shape

        if self.args.labels:
            with open(self.args.labels, 'r') as f:
                self.labels_map = [x.strip() for x in f]
        else:
            self.labels_map = None
    
    @abstractmethod
    def compute(self, frames):
        pass 

    @abstractmethod
    def _compute(self, frame, request_id):
        pass
    
    @abstractmethod
    def _pre_process(self, frame, cur_request_id=0):
        pass
    
    @abstractmethod
    def _infer(self, frame, request_id=0):
        pass
    
    @abstractmethod
    def _post_process(self, frame, cur_request_id=0):
        pass


def build_argparser():
    parser = ArgumentParser(add_help=False)
    args = parser.add_argument_group('Options')
    args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
    args.add_argument("-m", "--model", help="Required. Path to an .xml file with a trained model.",
                      default=None, type=str)
    args.add_argument("-fp", "--fp", help="Required. FP.",
                      default='FP32', type=str)
    args.add_argument("-d", "--device",
                      help="Optional. Specify the target device to infer on; CPU, GPU, FPGA, HDDL or MYRIAD is"
                           " acceptable. The sample will look for a suitable plugin for device specified. "
                           "Default value is CPU", default="CPU", type=str)
    args.add_argument("--labels", help="Optional. Labels mapping file", default=None, type=str)
    args.add_argument("-t", "--prob_threshold", help="Optional. Probability threshold for detections filtering",
                      default=0.5, type=float)
    args.add_argument("-iout", "--iou_threshold", help="Optional. Intersection over union threshold for overlapping "
                                                       "detections filtering", default=0.4, type=float)
    args.add_argument("-ni", "--number_iter", help="Optional. Number of inference iterations", default=1, type=int)
    args.add_argument("-pc", "--perf_counts", help="Optional. Report performance counters", default=False,
                      action="store_true")
    args.add_argument("-r", "--raw_output_message", help="Optional. Output inference results raw values showing",
                      default=False, action="store_true")
    args.add_argument("--no_show", help="Optional. Don't show output", action='store_true')
    parser = parser.parse_args('')
    return parser
