# bls功能详情见
# https://gitee.com/luo_zhi_cheng/triton-python_backend#business-logic-scripting

import torch
from torch.utils.dlpack import from_dlpack, to_dlpack
import numpy as np
import asyncio

import json
import triton_python_backend_utils as pb_utils


def sliding_window_and_infer(image, window_size, step_size_ratio, model_name):
    """
    使用滑动窗口切分图片，不保存直接推理
    """
    # 计算步长
    step_size = (int(window_size[0] * step_size_ratio[0]), int(window_size[1] * step_size_ratio[1]))
    
    # 获取图像尺寸
    img_height, img_width = image.shape[:2]
    win_width, win_height = window_size
    
    # 遍历图像进行切分
    infer_response_awaits = []
    output_names = ["output_image",]
    for y in range(0, img_height - win_height + 1, step_size[1]):
        for x in range(0, img_width - win_width + 1, step_size[0]):
            # 提取窗口区域，不会出现取到的window不是window_size的情况，因为如果最后的尺寸小于window_size，就不会再取

            #  Only contiguous DLPack tensors that are stored in C-Order are supported.
            # window = image[y:y + win_height, x:x + win_width]
            window = image[y:y + win_height, x:x + win_width].contiguous()
            
            inference_request = pb_utils.InferenceRequest(
                model_name=model_name,
                requested_output_names=output_names,
                inputs=[pb_utils.Tensor.from_dlpack("input_image", to_dlpack(torch.unsqueeze(window, dim=0)))]
            )

            infer_response_awaits.append(inference_request.async_exec())
    
    num_w = int((img_width - win_width + 1) / step_size[0]) + 1
    num_h = int((img_height - win_height + 1) / step_size[1]) + 1
    return infer_response_awaits, num_w, num_h


class TritonPythonModel:

    def initialize(self, args):
        self.logger = pb_utils.Logger

        # parse model configs
        self.model_config = json.loads(args['model_config'])

        # parse parameters
        self.parameters = self.model_config['parameters']
        self.model_name = self.model_config['name']

        self.max_input_w = int(self.parameters['max_input_w']['string_value'])
        self.max_input_h = int(self.parameters['max_input_h']['string_value'])
        self.model_name_infer = (self.parameters['model_name_infer']['string_value'])
        self.logger.log_info('max_input_w: {}'.format(self.max_input_w))
        self.logger.log_info('max_input_h: {}'.format(self.max_input_h))
        self.logger.log_info('model_name_infer: {}'.format(self.model_name_infer))

        self.logger.log_info('{} started'.format(self.model_name))

    # def execute(self, requests):
    # async def execute(self, requests):
    async def execute(self, requests):
        print('running {}'.format(self.model_name))
        # print(type(requests)) #List
        responses = []
        for request in requests:
            # print(type(request))
            batch_output = []

            # 注意这里不需要as_numpy就是pb_utils.Tensor类型
            input_image = pb_utils.get_input_tensor_by_name(request, 'input_image')
            window_size = pb_utils.get_input_tensor_by_name(request, 'window_size').as_numpy()
            step_ratio = pb_utils.get_input_tensor_by_name(request, 'step_ratio').as_numpy()
            # print(type(input_image)) #c_python_backend_utils.Tensor
            # print(input_image.shape)
            # print('input_image is_cpu: {}:{}'.format(self.model_name, input_image.is_cpu()))

            # 非法输入，图像太大
            input_image_torch = from_dlpack(input_image.to_dlpack())
            if input_image_torch.shape[1] > self.max_input_h or input_image_torch.shape[2] > self.max_input_w:
                print('inut too large, wh is: ({}, {})'.format(input_image_torch.shape[2], input_image_torch.shape[1]))
                return responses
            
            # batch永远为1
            # print(input_image_torch[0].shape)
            # print(window_size[0].shape)
            # print(step_ratio[0].shape)
            infer_response_awaits, num_w, num_h = sliding_window_and_infer(
                input_image_torch[0], window_size[0], step_ratio[0], self.model_name_infer)
            
            infer_responses = await asyncio.gather(*infer_response_awaits)

            for inference_response2 in infer_responses:
                output_image = pb_utils.get_output_tensor_by_name(inference_response2, 'output_image').as_numpy()
                batch_output.append(output_image)

            batch_output = np.expand_dims(np.vstack(batch_output), axis=0)
            # print(batch_output.shape)

            num_wh = np.array([[num_w, num_h]], dtype=np.int32)

            inference_response = pb_utils.InferenceResponse(
                output_tensors=[
                    pb_utils.Tensor("crop_infers", batch_output,),
                    # 此时num_wh已经是pb_utils.Tensor类型无需转换
                    pb_utils.Tensor(
                        "num_wh",
                        num_wh,
                    ),
                ]
            )
            responses.append(inference_response)

        return responses


    def finalize(self):
        self.logger.log_info('Finalizing {}'.format(self.model_name))
