import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.dlpack import from_dlpack

import json
import triton_python_backend_utils as pb_utils


def sliding_window(image, window_size, step_size_ratio):
    """
    使用滑动窗口切分图片并保存切分后的子图
    """
    # 计算步长
    step_size = (int(window_size[0] * step_size_ratio[0]), int(window_size[1] * step_size_ratio[1]))
    
    # 获取图像尺寸
    img_height, img_width = image.shape[:2]
    win_width, win_height = window_size
    
    # 遍历图像进行切分
    windows = []
    for y in range(0, img_height - win_height + 1, step_size[1]):
        for x in range(0, img_width - win_width + 1, step_size[0]):
            # 提取窗口区域
            window = image[y:y + win_height, x:x + win_width]
            # 不会出现取到的window不是window_size的情况，因为如果最后的尺寸小于window_size，就不会再取
            # if window.shape[0] != win_height or window.shape[1] != win_width:
            #     window = F.pad(window, pad=(0,win_width - window.shape[1],0,win_height - window.shape[0]), mode='constant', value=0)
            windows.append(torch.unsqueeze(window, dim=0))
    
    windows = torch.vstack(windows)
    num_w = int((img_width - win_width + 1) / step_size[0]) + 1
    num_h = int((img_height - win_height + 1) / step_size[1]) + 1
    return windows, num_w, num_h


class TritonPythonModel:

    def initialize(self, args):
        self.logger = pb_utils.Logger

        # parse model configs
        self.model_config = json.loads(args['model_config'])

        # parse parameters
        self.parameters = self.model_config['parameters']
        self.model_name = self.model_config['name']

        self.torch_device   = self.parameters['torch_device']['string_value']
        self.logger.log_info('torch_device: {}'.format(self.torch_device)) #若正常运行，前处理没有torch操作，因此有无该参数无所谓了

        self.logger.log_info('{} started'.format(self.model_name))


    def execute(self, requests):
        print('running {}'.format(self.model_name))
        # print(type(requests)) #List
        responses = []
        for request in requests:
            # print(type(request))
            input_image = pb_utils.get_input_tensor_by_name(request, 'input_image')
            window_size = pb_utils.get_input_tensor_by_name(request, 'window_size').as_numpy()
            step_ratio = pb_utils.get_input_tensor_by_name(request, 'step_ratio').as_numpy()
            # print(type(input_images))
            # print(input_images.shape)

            # to torch.Tensor, https://github.com/triton-inference-server/python_backend/blob/main/README.md
            input_image = from_dlpack(input_image.to_dlpack())
            # to gpu
            input_image = input_image.to(self.torch_device)

            num_wh_list = []
            for i in range(input_image.shape[0]):
                image = input_image[i, :, :, :]
                batch_output, num_w, num_h = sliding_window(image, (window_size[i,0], window_size[i,1]), (step_ratio[i,0], step_ratio[i,1]))
                num_wh_list.append(np.array([num_w, num_h], dtype=np.int32))
                
            # print(batch_output.device)
            batch_output = torch.unsqueeze(batch_output, dim=0)
            num_wh_list = np.vstack(num_wh_list)
            # print(batch_output.shape)

            if self.torch_device != 'cpu':
                batch_output = batch_output.cpu()
            batch_output = batch_output.numpy()

            inference_response = pb_utils.InferenceResponse(
                output_tensors=[
                    pb_utils.Tensor(
                        "output_crop_images",
                        batch_output,
                    ),
                    pb_utils.Tensor(
                        "num_wh",
                        num_wh_list,
                    ),
                ]
            )
            responses.append(inference_response)

        return responses


    def finalize(self):
        self.logger.log_info('Finalizing {}'.format(self.model_name))
