# bls功能详情见
# https://gitee.com/luo_zhi_cheng/triton-python_backend#business-logic-scripting

import torch
from torch.utils.dlpack import from_dlpack, to_dlpack
import numpy as np
import asyncio

import json
import triton_python_backend_utils as pb_utils


class TritonPythonModel:

    def initialize(self, args):
        self.logger = pb_utils.Logger

        # parse model configs
        self.model_config = json.loads(args['model_config'])

        # parse parameters
        self.parameters = self.model_config['parameters']
        self.model_name = self.model_config['name']

        self.max_input_w = int(self.parameters['max_input_w']['string_value'])
        self.max_input_h = int(self.parameters['max_input_h']['string_value'])
        self.model_name_step1 = (self.parameters['model_name_step1']['string_value'])
        self.model_name_step2 = (self.parameters['model_name_step2']['string_value'])
        self.logger.log_info('max_input_w: {}'.format(self.max_input_w))
        self.logger.log_info('max_input_h: {}'.format(self.max_input_h))
        self.logger.log_info('model_name_step1: {}'.format(self.model_name_step1))
        self.logger.log_info('model_name_step2: {}'.format(self.model_name_step2))

        self.logger.log_info('{} started'.format(self.model_name))

    # def execute(self, requests):
    # async def execute(self, requests):
    async def execute(self, requests):
        print('running {}'.format(self.model_name))
        # print(type(requests)) #List
        responses = []
        for request in requests:
            # print(type(request))
            batch_output = []

            ############################################################################################################
            output1_names = ["output_crop_images",
                            "num_wh",]
            
            # 注意这里不需要as_numpy就是pb_utils.Tensor类型
            input_image = pb_utils.get_input_tensor_by_name(request, 'input_image')
            window_size = pb_utils.get_input_tensor_by_name(request, 'window_size')
            step_ratio = pb_utils.get_input_tensor_by_name(request, 'step_ratio')
            # print(type(input_image)) #c_python_backend_utils.Tensor
            # print(input_image.shape)
            # print('input_image is_cpu: {}:{}'.format(self.model_name, input_image.is_cpu()))

            # 非法输入，图像太大
            input_image_torch = from_dlpack(input_image.to_dlpack())
            if input_image_torch.shape[1] > self.max_input_h or input_image_torch.shape[2] > self.max_input_w:
                print('inut too large, wh is: ({}, {})'.format(input_image_torch.shape[2], input_image_torch.shape[1]))
                return responses

            # 这里和client时async_infer的参数有点类似，但又不完全相同
            inference_request1 = pb_utils.InferenceRequest(
                model_name=self.model_name_step1,
                requested_output_names=output1_names,
                inputs=[input_image, window_size, step_ratio]
            )

            inference_response1 = inference_request1.exec()

            output_crop_images = pb_utils.get_output_tensor_by_name(inference_response1, 'output_crop_images')
            num_wh = pb_utils.get_output_tensor_by_name(inference_response1, 'num_wh')
            # print('output_crop_images is_cpu: {}:{}'.format(self.model_name, output_crop_images.is_cpu()))
            # to torch tensor
            output_crop_images = from_dlpack(output_crop_images.to_dlpack())
            # print(output_crop_images.shape)
            # print(output_crop_images.shape[1])
            # print(type(output_crop_images))
            ############################################################################################################

            ############################################################################################################
            output2_names = ["output_image",]

            '''
            for i in range(output_crop_images.shape[1]):
                # print(output_crop_images[:,i,...].shape)
                crop_image_triton = pb_utils.Tensor.from_dlpack("input_image", to_dlpack(output_crop_images[:,i,...]))
                # print(type(crop_image_triton))

                inference_request2 = pb_utils.InferenceRequest(
                    model_name=self.model_name_step2,
                    requested_output_names=output2_names,
                    inputs=[crop_image_triton]
                )

                inference_response2 = inference_request2.exec()

                output_image = pb_utils.get_output_tensor_by_name(inference_response2, 'output_image').as_numpy()
                batch_output.append(output_image)
            '''
            
            # '''
            # 异步貌似快一丢丢
            infer_response_awaits = []
            for i in range(output_crop_images.shape[1]):
                # print(output_crop_images[:,i,...].shape)
                # print('output_crop_images[:,i,...].device: {}:{}'.format(self.model_name, output_crop_images[:,i,...].device))
                crop_image_triton = pb_utils.Tensor.from_dlpack("input_image", to_dlpack(output_crop_images[:,i,...]))
                # print(type(crop_image_triton))

                inference_request2 = pb_utils.InferenceRequest(
                    model_name=self.model_name_step2,
                    requested_output_names=output2_names,
                    inputs=[crop_image_triton]
                )

                infer_response_awaits.append(inference_request2.async_exec())

            infer_responses = await asyncio.gather(*infer_response_awaits)

            for inference_response2 in infer_responses:
                output_image = pb_utils.get_output_tensor_by_name(inference_response2, 'output_image')
                output_image = from_dlpack(output_image.to_dlpack())
                batch_output.append(output_image)
            # '''
            ############################################################################################################

            # batch_output = np.expand_dims(np.vstack(batch_output), axis=0)
            batch_output = torch.unsqueeze(torch.vstack(batch_output), dim=0)
            # print(batch_output.shape)

            inference_response = pb_utils.InferenceResponse(
                output_tensors=[
                    # pb_utils.Tensor("crop_infers", batch_output,),
                    pb_utils.Tensor.from_dlpack("crop_infers", to_dlpack(batch_output)),
                    # 此时num_wh已经是pb_utils.Tensor类型无需转换
                    num_wh
                ]
            )
            responses.append(inference_response)

        return responses


    def finalize(self):
        self.logger.log_info('Finalizing {}'.format(self.model_name))
