#!/usr/bin/env python
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#  * Redistributions of source code must retain the above copyright
#    notice, this list of conditions and the following disclaimer.
#  * Redistributions in binary form must reproduce the above copyright
#    notice, this list of conditions and the following disclaimer in the
#    documentation and/or other materials provided with the distribution.
#  * Neither the name of NVIDIA CORPORATION nor the names of its
#    contributors may be used to endorse or promote products derived
#    from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

import aiohttp
import argparse
from tritonclient.utils import *
import numpy as np
import os
from builtins import range
#from tensorrtserver.api import *
import cv2
#import tritongrpcclient
import tritonclient.http as httpclient
from tritonclient.utils import InferenceServerException
from nms.nms_py import cpu_nms
import asyncio
import struct
import json

def _get_inference_request(inputs, request_id, outputs, sequence_id,
                           sequence_start, sequence_end, priority, timeout):
    infer_request = {}
    parameters = {}
    if request_id != "":
        infer_request['id'] = request_id
    if sequence_id != 0:
        parameters['sequence_id'] = sequence_id
        parameters['sequence_start'] = sequence_start
        parameters['sequence_end'] = sequence_end
    if priority != 0:
        parameters['priority'] = priority
    if timeout is not None:
        parameters['timeout'] = timeout

    infer_request['inputs'] = [
        this_input._get_tensor() for this_input in inputs
    ]
    if outputs:
        infer_request['outputs'] = [
            this_output._get_tensor() for this_output in outputs
        ]
    else:
        # no outputs specified so set 'binary_data_output' True in the
        # request so that all outputs are returned in binary format
        parameters['binary_data_output'] = True

    if parameters:
        infer_request['parameters'] = parameters

    request_body = json.dumps(infer_request)
    json_size = len(request_body)
    binary_data = None
    for input_tensor in inputs:
        raw_data = input_tensor._get_binary_data()
        if raw_data is not None:
            if binary_data is not None:
                binary_data += raw_data
            else:
                binary_data = raw_data

    if binary_data is not None:
        request_body = struct.pack(
            '{}s{}s'.format(len(request_body), len(binary_data)),
            request_body.encode(), binary_data)
        return request_body, json_size

    return request_body, None



async def get_response_content(response):
    header_length = response.headers.get('Inference-Header-Content-Length')
    response_content = response.content
    if header_length is None:
        content =  await response_content.read()
        return content, None
    else:
        header_length = int(header_length)
        content = await response_content.read(header_length)
        _buffer =  await response_content.read()
        return content, _buffer

class InferResult:
    """An object of InferResult class holds the response of
    an inference request and provide methods to retrieve
    inference results.

    Parameters
    ----------
    result : dict
        The inference response from the server
    verbose : bool
        If True generate verbose output. Default value is False.
    """

    def __init__(self, response, content, buffer, verbose):
        # print(response.headers)
        header_length = response.headers.get('Inference-Header-Content-Length')
        if header_length is None:
            if verbose:
                print(content)
            self._result = json.loads(content)
        else:
            if verbose:
                print(content)
            self._result = json.loads(content)

            # Maps the output name to the index in buffer for quick retrieval
            self._output_name_to_buffer_map = {}
            # Read the remaining data off the response body.
            self._buffer = buffer
            buffer_index = 0
            for output in self._result['outputs']:
                parameters = output.get("parameters")
                if parameters is not None:
                    this_data_size = parameters.get("binary_data_size")
                    if this_data_size is not None:
                        self._output_name_to_buffer_map[
                            output['name']] = buffer_index
                        buffer_index = buffer_index + this_data_size

    def as_numpy(self, name):
        """Get the tensor data for output associated with this object
        in numpy format

        Parameters
        ----------
        name : str
            The name of the output tensor whose result is to be retrieved.

        Returns
        -------
        numpy array
            The numpy array containing the response data for the tensor or
            None if the data for specified tensor name is not found.
        """
        if self._result.get('outputs') is not None:
            for output in self._result['outputs']:
                if output['name'] == name:
                    datatype = output['datatype']
                    has_binary_data = False
                    parameters = output.get("parameters")
                    if parameters is not None:
                        this_data_size = parameters.get("binary_data_size")
                        if this_data_size is not None:
                            has_binary_data = True
                            if this_data_size != 0:
                                start_index = self._output_name_to_buffer_map[
                                    name]
                                end_index = start_index + this_data_size
                                if datatype == 'BYTES':
                                    # String results contain a 4-byte string length
                                    # followed by the actual string characters. Hence,
                                    # need to decode the raw bytes to convert into
                                    # array elements.
                                    np_array = deserialize_bytes_tensor(
                                        self._buffer[start_index:end_index])
                                else:
                                    np_array = np.frombuffer(
                                        self._buffer[start_index:end_index],
                                        dtype=triton_to_np_dtype(datatype))
                            else:
                                np_array = np.empty(0)
                    if not has_binary_data:
                        np_array = np.array(output['data'],
                                            dtype=triton_to_np_dtype(datatype))
                    np_array = np.resize(np_array, output['shape'])
                    return np_array
        return None

    def get_output(self, name):
        """Retrieves the output tensor corresponding to the named ouput.

        Parameters
        ----------
        name : str
            The name of the tensor for which Output is to be
            retrieved.

        Returns
        -------
        Dict
            If an output tensor with specified name is present in
            the infer resonse then returns it as a json dict,
            otherwise returns None.
        """
        for output in self._result['outputs']:
            if output['name'] == name:
                return output

        return None

    def get_response(self):
        """Retrieves the complete response

        Returns
        -------
        dict
            The underlying response dict.
        """
        return self._result





async def get_box(triton_client,image_url):
    model_name = "retinaface-50"
    model_version = 1
    batch_size = 1
    #infer_ctx = InferContext('localhost:28001', protocol, model_name, model_version,http_headers=None, verbose=False)
    img_data = cv2.imread(image_url)
    img_data = np.array(img_data)
    print(img_data.shape)
    img_data = np.transpose(img_data, [2, 0, 1])
    img_data = np.expand_dims(img_data, 0).astype('float32')
    inputs =[]
    inputs.append(httpclient.InferInput('data', [1, 3, 480, 640], "FP32"))
    outputs = []
    inputs[0].set_data_from_numpy(img_data)


    outputs.append(httpclient.InferRequestedOutput('prob'))
    request_body, json_size = _get_inference_request(
            inputs=inputs,
            request_id="",
            outputs=outputs,
            sequence_id=0,
            sequence_start=False,
            sequence_end=False,
            priority=0,
            timeout=None)
    headers = {}
    headers["Inference-Header-Content-Length"] = str(json_size)
    #sess = aiohttp.ClientSession()
    request_uri = "http://127.0.0.1:18000/v2/models/retinaface-50/infer"
    async with aiohttp.ClientSession() as sess:
        async with sess.post(request_uri,data=request_body,headers=headers) as resp:
            content, buffer  = await get_response_content(resp)
            results =  InferResult(resp, content, buffer, False)
            boxes = np.frombuffer(results.as_numpy("prob"), dtype=np.float32)
    #results  = triton_client.infer(model_name=model_name, inputs=inputs,  outputs=outputs, headers={"test": "1"})
    #results =  await async_request.get_result()
    print(boxes[0])
    

    return boxes


if __name__ == '__main__':
    try:
        triton_client = httpclient.InferenceServerClient(
                url="127.0.0.1:18000", verbose=False,concurrency=700)
    except Exception as e:
        print("channel creation failed: " + str(e))
        sys.exit()
    img = cv2.imread('dest.jpg')
    boxes = asyncio.run(get_box(triton_client,"dest.jpg"))
    print(boxes.shape)
    nms_len =  int(boxes[0]*15+1)
    print(nms_len)
    nms_box = boxes[1:nms_len].reshape(-1,15)
    print(nms_box.shape)
    keep = cpu_nms(nms_box,0.6)
    #det, landmarks = nms_box[keep][:,0:4], nms_box[keep][:,5:]
    for i in keep:
        box = nms_box[i]
        color = (0, 0, 255)
        cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), color, 2)
        cv2.circle(img, (box[5], box[6]), 1, (0, 0, 255), 4)  # Left eye left corner
        cv2.circle(img, (box[7], box[8]), 1, (0, 255, 255), 4)  # Right eye right corner
        cv2.circle(img, (box[9], box[10]), 1, (255, 0, 255), 4)  # Nose tip
        cv2.circle(img, (box[11], box[12]), 1, (0, 255, 0), 4)  # Left Mouth corner
        cv2.circle(img, (box[13], box[14]), 1, (255, 0, 0), 4)
    cv2.imwrite("img.jpg",img)

