import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.dlpack import from_dlpack
import json
import triton_python_backend_utils as pb_utils


def docoder_postprocess(masks, img_size):
    masks = torch.clamp(masks, -32.0, 32.0)
    masks = F.interpolate(masks, (img_size[0], img_size[1]), mode="bilinear", align_corners=False)
    return masks


class TritonPythonModel:

    def initialize(self, args):
        self.logger = pb_utils.Logger

        # parse model configs
        self.model_config = json.loads(args['model_config'])

        # parse parameters
        self.parameters = self.model_config['parameters']
        self.model_name = self.model_config['name']

        self.torch_device   = self.parameters['torch_device']['string_value']
        self.logger.log_info('torch_device: {}'.format(self.torch_device)) #若正常运行，前处理没有torch操作，因此有无该参数无所谓了

        self.logger.log_info('{} started'.format(self.model_name))


    def execute(self, requests):
        print('running {}'.format(self.model_name))
        # print(type(requests)) #List
        responses = []
        for request in requests:
            # print(type(request))
            masks = pb_utils.get_input_tensor_by_name(request, 'masks')#nhwc
            masks_torch = from_dlpack(masks.to_dlpack())
            org_img_hw = pb_utils.get_input_tensor_by_name(request, 'org_img_hw').as_numpy()#nhwc
            batch = org_img_hw.shape[0]
            # print(type(images))
            # print(images.shape)

            masks_out = []
            for i in range(batch):
                mask_torch = masks_torch[i, ...].unsqueeze_(0)
                mask_out = docoder_postprocess(mask_torch, (org_img_hw[i, 0], org_img_hw[i, 1]))
                mask_out = mask_out.cpu().numpy()
                masks_out.append(mask_out)

            masks_out = np.vstack(masks_out)
            # print(masks_out.shape)

            inference_response = pb_utils.InferenceResponse(
                output_tensors=[
                    pb_utils.Tensor(
                        "rsz_masks",
                        masks_out,
                    ),
                ]
            )
            responses.append(inference_response)

        return responses


    def finalize(self):
        self.logger.log_info('Finalizing {}'.format(self.model_name))
