import os 
import nvcv
import torch
import cvcuda
import logging
import numpy as np
import tensorrt as trt
from collections import OrderedDict,namedtuple



class PreprocessorCvcuda:
    # docs_tag: begin_init_preprocessorcvcuda
    def __init__(self, device_id):
        self.logger = logging.getLogger(__name__)
        self.device_id = device_id
        self.scale = 1 / 255
        self.logger.info("Using CVCUDA as preprocessor.")

    # docs_tag: begin_call_preprocessorcvcuda
    def __call__(self, frame_nhwc, out_size):
        resized = cvcuda.resize(
            frame_nhwc,
            (frame_nhwc.shape[0],
             out_size[1],out_size[0],
             frame_nhwc.shape[3]),
            cvcuda.Interp.LINEAR,)
        # Convert to floating point range 0-1.
        rgb_tensor = cvcuda.cvtcolor(resized, cvcuda.ColorConversion.BGR2RGB)
        normalized = cvcuda.convertto(rgb_tensor, np.float32, scale=1 / 255)
        # Convert it to NCHW layout and return it.
        normalized = cvcuda.reformat(normalized, "NCHW")
        return (
            frame_nhwc,
            resized,
            rgb_tensor,
            normalized,
        )


class PostprocessorCvcuda:
    def __init__(
        self,
        confidence_threshold,
        iou_threshold,
        device_id,
        output_layout,
        gpu_output,
        batch_size,
        cvcuda_perf,
    ):
        # docs_tag: begin_init_postprocessorcvcuda
        self.logger = logging.getLogger(__name__)
        self.confidence_threshold = confidence_threshold
        self.iou_threshold = iou_threshold
        self.device_id = device_id
        self.output_layout = output_layout
        self.gpu_output = gpu_output
        self.batch_size = batch_size
        self.cvcuda_perf = cvcuda_perf

        # The Peoplenet model uses Gridbox system which divides an input image into a grid and
        # predicts four normalized bounding-box parameters for each grid.
        # The number of grid boxes is determined by the model architecture.
        # For peoplenet model, the 960x544 input image is divided into 60x34 grids.
        self.stride = 16
        self.bbox_norm = 35
        self.offset = 0.5
        self.network_width = 960
        self.network_height = 544
        self.num_rows = int(self.network_height / self.stride)
        self.num_cols = int(self.network_width / self.stride)
        self.num_classes = 3  # Number of classes the model is trained on
        self.bboxutil = BoundingBoxUtilsCvcuda(
            self.cvcuda_perf
        )  # Initializes the Bounding Box utils
        # Center of grids
        self.center_x = None
        self.center_y = None
        self.x_values = None
        self.y_values = None

        self.logger.info("Using CVCUDA as post-processor.")

    # docs_tag: end_init_postprocessorcvcuda

    def interpolate(self, boxes_pyt, image_scale_x, image_scale_y, batch_size):
        """
        Translates the bounding boxes from the grid layout to the image layout.
        """
        # docs_tag: begin_interpolate

        # Buffer batch size needs to be updated if batch size is modified
        if (
            self.center_x is None
            or self.center_y is None
            or self.batch_size != batch_size
        ):
            self.center_x = torch.zeros(
                [batch_size, self.num_rows, self.num_cols]
            ).cuda(device=self.device_id)
            self.center_y = torch.zeros(
                [batch_size, self.num_rows, self.num_cols]
            ).cuda(device=self.device_id)
            self.y_values = torch.full([self.num_cols], 1).cuda(device=self.device_id)
            self.x_values = torch.arange(0, self.num_cols).cuda(device=self.device_id)

            # Denormalize the bounding boxes
            # Compute the center of each grid
            self.cvcuda_perf.push_range("forloop1")
            for r in range(0, self.num_rows):
                self.center_y[:, r, :] = (
                    self.y_values * r * self.stride + self.offset
                ) / self.bbox_norm
                self.center_x[:, r, :] = (
                    self.x_values * self.stride + self.offset
                ) / self.bbox_norm

            self.cvcuda_perf.pop_range()

        # The raw bounding boxes shape is [N, C*4, X, Y]
        # Where N is batch size, C is number of classes, 4 is the bounding box coordinates,
        # X is the row index of the grid, Y is the column index of the grid
        # The order of the coordinates is left, bottom, right, top
        self.cvcuda_perf.push_range("forloop2")
        boxes_pyt = boxes_pyt.permute(0, 2, 3, 1)

        # The raw bounding boxes shape is [N, C*4, X, Y]
        # Where N is batch size, C is number of classes, 4 is the bounding box coordinates,
        # X is the row index of the grid, Y is the column index of the grid
        # The order of the coordinates is left, bottom, right, top
        # Shift the grid center
        for c in range(self.num_classes):
            boxes_pyt[:, :, :, 4 * c + 0] -= self.center_x
            boxes_pyt[:, :, :, 4 * c + 1] += self.center_y
            boxes_pyt[:, :, :, 4 * c + 2] += self.center_x
            boxes_pyt[:, :, :, 4 * c + 3] -= self.center_y
            # Apply the bounding box scale of the model
            boxes_pyt[:, :, :, 4 * c + 0] *= -self.bbox_norm * image_scale_x
            boxes_pyt[:, :, :, 4 * c + 1] *= self.bbox_norm * image_scale_y
            boxes_pyt[:, :, :, 4 * c + 2] *= self.bbox_norm * image_scale_x
            boxes_pyt[:, :, :, 4 * c + 3] *= -self.bbox_norm * image_scale_y

        self.cvcuda_perf.pop_range()
        return boxes_pyt

    # docs_tag: end_interpolate

    def __call__(self, raw_boxes_pyt, raw_scores_pyt, frame_nhwc):

        self.cvcuda_perf.push_range("postprocess.cvcuda")

        # docs_tag: begin_call_filterbboxcvcuda
        self.cvcuda_perf.push_range("interpolate")
        batch_size = raw_boxes_pyt.shape[0]
        image_scale_x = frame_nhwc.shape[2] / self.network_width
        image_scale_y = frame_nhwc.shape[1] / self.network_height
        # Interpolate bounding boxes to original image resolution
        interpolated_boxes_pyt = self.interpolate(
            raw_boxes_pyt, image_scale_x, image_scale_y, batch_size
        )
        self.cvcuda_perf.pop_range()

        self.cvcuda_perf.push_range("pre-nms")
        raw_scores_pyt = raw_scores_pyt.permute(0, 2, 3, 1)
        batch_scores_pyt = torch.flatten(raw_scores_pyt, start_dim=1, end_dim=3)

        # Apply NMS to filter the bounding boxes based on the confidence threshold
        # and the IOU threshold.
        batch_bboxes_pyt = torch.flatten(interpolated_boxes_pyt, start_dim=1, end_dim=2)
        batch_bboxes_pyt = batch_bboxes_pyt.reshape(batch_size, -1, 4)
        # Convert from left, bottom, right, top format to x, y, w, h format
        batch_bboxes_pyt[:, :, [1, 3]] = batch_bboxes_pyt[:, :, [3, 1]]
        batch_bboxes_pyt[:, :, 2] = (
            batch_bboxes_pyt[:, :, 2] - batch_bboxes_pyt[:, :, 0]
        )
        batch_bboxes_pyt[:, :, 3] = (
            batch_bboxes_pyt[:, :, 3] - batch_bboxes_pyt[:, :, 1]
        )
        # Convert to int16 - the data type required by the CV-CUDA NMS.
        batch_bboxes_pyt = batch_bboxes_pyt.to(
            torch.int16, memory_format=torch.contiguous_format
        )

        # Wrap torch tensor as cvcuda array
        cvcuda_boxes = cvcuda.as_tensor(batch_bboxes_pyt)
        cvcuda_scores = cvcuda.as_tensor(batch_scores_pyt.contiguous().cuda())
        self.cvcuda_perf.pop_range()

        # Apply non-maximum suppression on the bounding boxes. CV-CUDA NMS will not change
        # the shape of the resulting tensor. It will still have the same shape as the
        # input tensor. It will simply return an output boolean mask with suppressed bboxes
        # as zeros and selected bboxes as ones. Later we will filter those ones out.
        self.cvcuda_perf.push_range("nms")
        nms_masks = cvcuda.nms(
            cvcuda_boxes, cvcuda_scores, self.confidence_threshold, self.iou_threshold
        )
        nms_masks_pyt = torch.as_tensor(
            nms_masks.cuda(), device="cuda:%d" % self.device_id, dtype=torch.bool
        )
        self.cvcuda_perf.pop_range()

        # Give these NMS bounding boxes to our helper class which will filter the zeros
        # out and render bounding boxes with blur in them on the input frame.
        # docs_tag: start_outbuffer
        self.cvcuda_perf.push_range("bboxutil")
        frame_nhwc = self.bboxutil(batch_bboxes_pyt, nms_masks_pyt, frame_nhwc)
        self.cvcuda_perf.pop_range()
        if self.output_layout == "NCHW":
            render_output = cvcuda.reformat(frame_nhwc, "NCHW")
        else:
            assert self.output_layout == "NHWC"
            render_output = frame_nhwc

        if self.gpu_output:
            render_output = torch.as_tensor(
                render_output.cuda(), device="cuda:%d" % self.device_id
            )
        else:
            render_output = torch.as_tensor(render_output.cuda()).cpu().numpy()

        self.cvcuda_perf.pop_range()  # postprocess

        # Return the original nhwc frame with bboxes rendered and ROI's blurred
        return render_output
        # docs_tag: end_outbuffer



class TRT_Infer:
    def __init__(self,engine_dir):
        self.device = "cuda" if torch.cuda.is_available() else "cpu"
        print(f"loading{engine_dir} for TensorRT inference")
        self.Binding = namedtuple("Binding",("name","dtype","shape","data","ptr"))
        self.trt_log = trt.Logger(trt.Logger.INFO)
        with open(engine_dir,"rb") as f,trt.Runtime(self.trt_log) as runtime:
            self.trt_model = runtime.deserialize_cuda_engine(f.read())
        self.bingdings = OrderedDict()
        self.fp16 = False
        for idx in range(self.trt_model.num_bindings):
            name = self.trt_model.get_binding_name(idx)
            dtype = trt.nptype(self.trt_model.get_binding_dtype(idx))
            shape = self.trt_model.get_binding_shape(idx)
            # 构建输入输出绑定
            data = torch.from_numpy(np.empty(shape,dtype=np.dtype(dtype))).to(self.device)
            self.bingdings[name] = self.Binding(name,dtype,shape,data,int(data.data_ptr()))
            if self.trt_model.binding_is_input(idx) and dtype==np.float16:
                self.fp16 = True
        self.binding_addr = OrderedDict((name_B,Bin.ptr) for name_B,Bin in self.bingdings.items())
        self.context = self.trt_model.create_execution_context()
        
    def __call__(self,img_tensor):
        self.bingdings["images"] = img_tensor.cuda().__cuda_array_interface__["data"][0]
        self.context.execute_v2(list(self.binding_addr.values()))
        y = self.bingdings["output0"].data
        return y
        
        
                
            

        
        
        
