import numpy as np
import torch
from torch.utils.dlpack import from_dlpack, to_dlpack

import json
import triton_python_backend_utils as pb_utils


def bgr2rgb_kernel(mat):
    return torch.flip(mat, dims=[2])


class TritonPythonModel:

    def initialize(self, args):
        self.logger = pb_utils.Logger

        # parse model configs
        self.model_config = json.loads(args['model_config'])

        # parse parameters
        self.parameters = self.model_config['parameters']
        self.model_name = self.model_config['name']

        self.torch_device   = self.parameters['torch_device']['string_value']
        self.logger.log_info('torch_device: {}'.format(self.torch_device)) #若正常运行，前处理没有torch操作，因此有无该参数无所谓了

        self.logger.log_info('{} started'.format(self.model_name))


    def execute(self, requests):
        print('running {}'.format(self.model_name))
        # print(type(requests)) #List
        responses = []
        for request in requests:
            # print(type(request))
            input_image = pb_utils.get_input_tensor_by_name(request, 'input_image')
            # print(type(input_image))
            # print(input_image.shape)
            # print('input_image is_cpu: {}:{}'.format(self.model_name, input_image.is_cpu()))

            # to torch.Tensor, https://github.com/triton-inference-server/python_backend/blob/main/README.md
            input_image = from_dlpack(input_image.to_dlpack())

            batch_output = []
            for i in range(input_image.shape[0]):
                output = bgr2rgb_kernel(input_image[i, :, :, :])
                batch_output.append(torch.unsqueeze(output, dim=0))
                
            # print(batch_output.device)
            batch_output = torch.vstack(batch_output)
            # print(batch_output.shape)

            inference_response = pb_utils.InferenceResponse(
                output_tensors=[
                    pb_utils.Tensor.from_dlpack("output_image", to_dlpack(batch_output)),
                ]
            )
            responses.append(inference_response)

        return responses


    def finalize(self):
        self.logger.log_info('Finalizing {}'.format(self.model_name))
