import traceback

import numpy as np
import cv2


class OMInferenceWrapper:  # acl
    try:
        import acl
    except ImportError:
        print("ACL not found")
        acl = None
    # === 手动定义 ACL 内存分配策略常量 ===
    ACL_MEM_MALLOC_HUGE_FIRST = 0
    ACL_MEM_MALLOC_HUGE_ONLY = 1
    ACL_MEM_MALLOC_NORMAL_ONLY = 2

    # 如果用到 memcpy 方向，也建议定义：
    ACL_MEMCPY_HOST_TO_DEVICE = 1
    ACL_MEMCPY_DEVICE_TO_HOST = 2

    def __init__(self, device_id=0):
        self.device_id = device_id
        self.context = None
        self.model_id = None
        self.model_desc = None
        self.input_dataset = None
        self.output_dataset = None
        self.input_buffers = []
        self.output_buffers = []

        # 初始化 ACL
        ret = OMInferenceWrapper.acl.init()
        assert ret == 0, "ACL init failed"
        ret = OMInferenceWrapper.acl.rt.set_device(self.device_id)
        assert ret == 0, "Set device failed"
        self.context, ret = OMInferenceWrapper.acl.rt.create_context(self.device_id)
        assert ret == 0, "Create context failed"

    def load_model(self, model_path):
        self.model_id, ret = OMInferenceWrapper.acl.mdl.load_from_file(model_path)
        assert ret == 0, "Load model failed"
        self.model_desc = OMInferenceWrapper.acl.mdl.create_desc()
        ret = OMInferenceWrapper.acl.mdl.get_desc(self.model_desc, self.model_id)
        assert ret == 0, "Get model desc failed"

        self._allocate_input_buffers()
        self._allocate_output_buffers()

    def _allocate_input_buffers(self):
        self.input_dataset = OMInferenceWrapper.acl.mdl.create_dataset()
        num_inputs = OMInferenceWrapper.acl.mdl.get_num_inputs(self.model_desc)
        print(f'acl.mdl.get_num_inputs(self.model_desc) {num_inputs}')

        for i in range(num_inputs):  # 默认支持1个？
            size = OMInferenceWrapper.acl.mdl.get_input_size_by_index(self.model_desc, i)
            print(f'acl.mdl.get_input_size_by_index(self.model_desc, i) {size}')
            print(
                f'acl.mdl.get_input_dims(self.model_desc, i) {OMInferenceWrapper.acl.mdl.get_input_dims(self.model_desc, i)}')
            buffer, ret = OMInferenceWrapper.acl.rt.malloc(size, OMInferenceWrapper.ACL_MEM_MALLOC_HUGE_FIRST)
            assert ret == 0, f"Malloc input buffer {i} failed"
            data_buffer = OMInferenceWrapper.acl.create_data_buffer(buffer, size)
            OMInferenceWrapper.acl.mdl.add_dataset_buffer(self.input_dataset, data_buffer)
            self.input_buffers.append({"buffer": buffer, "size": size})

    def _allocate_output_buffers(self):
        self.output_dataset = OMInferenceWrapper.acl.mdl.create_dataset()
        num_outputs = OMInferenceWrapper.acl.mdl.get_num_outputs(self.model_desc)
        print(f'acl.mdl.get_num_outputs(self.model_desc) {num_outputs}')
        for i in range(num_outputs):
            size = OMInferenceWrapper.acl.mdl.get_output_size_by_index(self.model_desc, i)
            print(f'acl.mdl.get_output_size_by_index(self.model_desc, i) {size}')
            buffer, ret = OMInferenceWrapper.acl.rt.malloc(size, OMInferenceWrapper.ACL_MEM_MALLOC_HUGE_FIRST)
            assert ret == 0, f"Malloc output buffer {i} failed"
            data_buffer = OMInferenceWrapper.acl.create_data_buffer(buffer, size)
            OMInferenceWrapper.acl.mdl.add_dataset_buffer(self.output_dataset, data_buffer)
            self.output_buffers.append({"buffer": buffer, "size": size})

    def run(self, input_data):
        # input_data = self.preprocess(image_path)
        input_ptr = OMInferenceWrapper.acl.util.numpy_to_ptr(input_data)
        input_size = input_data.size * input_data.itemsize
        print(f'input_data.size * input_data.itemsize {input_size}')

        # 拷贝输入到设备内存，可能input_size < input_buffers[0]["size"]
        OMInferenceWrapper.acl.rt.memcpy(self.input_buffers[0]["buffer"],
                                         self.input_buffers[0]["size"],
                                         input_ptr,
                                         input_size,
                                         OMInferenceWrapper.ACL_MEMCPY_HOST_TO_DEVICE)

        # 执行推理
        ret = OMInferenceWrapper.acl.mdl.execute(self.model_id, self.input_dataset, self.output_dataset)
        assert ret == 0, "Model execute failed"

        # 拷贝输出回 Host
        # self.output_buffers[0]["buffer"]
        # self.output_buffers[0]["size"]
        output_host = np.empty([self.output_buffers[0]["size"]], dtype=np.byte)
        OMInferenceWrapper.acl.rt.memcpy(OMInferenceWrapper.acl.util.numpy_to_ptr(output_host),
                                         output_host.nbytes,
                                         self.output_buffers[0]["buffer"],
                                         self.output_buffers[0]["size"],
                                         OMInferenceWrapper.ACL_MEMCPY_DEVICE_TO_HOST)

        # 解析输出（YOLOv7 输出为 [batch, num_boxes, 85]）
        output_tensor = np.frombuffer(output_host, dtype=np.float32)
        output_shape = OMInferenceWrapper.acl.mdl.get_output_dims(self.model_desc, 0)[0]["dims"]
        output_tensor = output_tensor.reshape(output_shape)
        return output_tensor

    def destroy(self):
        if self.model_id:
            OMInferenceWrapper.acl.mdl.unload(self.model_id)
        if self.model_desc:
            OMInferenceWrapper.acl.mdl.destroy_desc(self.model_desc)
        for buf in self.input_buffers:
            OMInferenceWrapper.acl.rt.free(buf["buffer"])
        for buf in self.output_buffers:
            OMInferenceWrapper.acl.rt.free(buf["buffer"])
        if self.context:
            OMInferenceWrapper.acl.rt.destroy_context(self.context)
        OMInferenceWrapper.acl.rt.reset_device(self.device_id)
        OMInferenceWrapper.acl.finalize()


class ONNXInferenceWrapper:  # onnxruntime
    import onnxruntime as ort
    def __init__(self, model_path):
        self.onnx_infer_handle = ONNXInferenceWrapper.ort.InferenceSession(model_path)
        self.input_name = self.onnx_infer_handle.get_inputs()[0].name
        self.output_names = [out.name for out in self.onnx_infer_handle.get_outputs()]

    def run(self, input_numpy):
        outputs = self.onnx_infer_handle.run(self.output_names, {self.input_name: input_numpy})
        return outputs[0]


class PTInferenceWrapper:  # pytorch
    try:
        from models.experimental import attempt_load  # 需 yolov7 代码目录在 PYTHONPATH
        import torch
    except:
        print(traceback.format_exc())
        attempt_load = None
        torch = None

    def __init__(self, pt_path):
        self.model = PTInferenceWrapper.attempt_load(pt_path, map_location='cpu')
        self.model.eval()

    def run(self, input_numpy):
        with PTInferenceWrapper.torch.no_grad():
            input_tensor = PTInferenceWrapper.torch.from_numpy(input_numpy)
            pred = self.model(input_tensor)[0]  # YOLOv7 输出为 list，取第一个（检测头输出）
        return pred.numpy()


class YOLOv7Inference:

    def __init__(self, model_path):
        is_om = model_path.endswith('om')
        is_onnx = model_path.endswith('onnx')
        is_pt = model_path.endswith('pt')
        if is_om:
            self.engine = OMInferenceWrapper()
            self.engine.load_model(model_path)
        elif is_onnx:
            self.engine = ONNXInferenceWrapper(model_path)
        elif is_pt:
            self.engine = PTInferenceWrapper(model_path)
        else:
            raise Exception('Unsupported model format')
        self.rt_var = {
            'iou_param': 0.5,
            'conf_param': 0.25,
        }

    def _letterbox(self, img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True,
                   stride=32):
        # Resize and pad image while meeting stride-multiple constraints
        shape = img.shape[:2]  # current shape [height, width]
        if isinstance(new_shape, int):
            new_shape = (new_shape, new_shape)

        # Scale ratio (new / old)
        r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
        if not scaleup:  # only scale down, do not scale up (for better test mAP)
            r = min(r, 1.0)

        # Compute padding
        ratio = r, r  # width, height ratios
        new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
        dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding
        if auto:  # minimum rectangle
            dw, dh = np.mod(dw, stride), np.mod(dh, stride)  # wh padding
        elif scaleFill:  # stretch
            dw, dh = 0.0, 0.0
            new_unpad = (new_shape[1], new_shape[0])
            ratio = new_shape[1] / shape[1], new_shape[0] / shape[0]  # width, height ratios

        dw /= 2  # divide padding into 2 sides
        dh /= 2

        if shape[::-1] != new_unpad:  # resize
            img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
        top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
        left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
        img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
        return img, ratio, (dw, dh)

    def _nms(self, boxes, scores, iou_threshold=0.45):
        """
        Pure NumPy NMS implementation.
        Args:
            boxes: shape [N, 4], format (x1, y1, x2, y2)
            scores: shape [N]
            iou_threshold: float
        Returns:
            keep: list of indices to keep
        """
        if boxes.size == 0:
            return []

        x1 = boxes[:, 0]
        y1 = boxes[:, 1]
        x2 = boxes[:, 2]
        y2 = boxes[:, 3]

        areas = (x2 - x1 + 1) * (y2 - y1 + 1)
        order = scores.argsort()[::-1]  # descending order

        keep = []
        while order.size > 0:
            i = order[0]
            keep.append(i)

            xx1 = np.maximum(x1[i], x1[order[1:]])
            yy1 = np.maximum(y1[i], y1[order[1:]])
            xx2 = np.minimum(x2[i], x2[order[1:]])
            yy2 = np.minimum(y2[i], y2[order[1:]])

            w = np.maximum(0.0, xx2 - xx1 + 1)
            h = np.maximum(0.0, yy2 - yy1 + 1)
            inter = w * h

            iou = inter / (areas[i] + areas[order[1:]] - inter)
            inds = np.where(iou <= iou_threshold)[0]
            order = order[inds + 1]

        return keep

    def _preprocess(self, image_path, input_h=640, input_w=640):
        img = cv2.imread(image_path) if isinstance(image_path, str) else image_path

        # img, ratio, (dw, dh) = self._letterbox(img, new_shape=(input_w, input_h))
        img, ratio, (dw, dh) = self._letterbox(img, new_shape=(input_w, input_h), auto=False)
        # cv2.imwrite('letterbox.jpg', img)
        self.rt_var['ratio'] = ratio
        self.rt_var['pad'] = (dw, dh)
        img = img[..., ::-1]  # BGR to RGB
        img = img.transpose(2, 0, 1)  # CHW
        img = img[None, ...]  # BCHW
        #
        img = img.astype(np.float32) / 255.0
        img = np.ascontiguousarray(img)
        # img = np.repeat(img, 8, axis=0)  # 沿 batch 维度重复 8 次

        return img

    def _postprocess(self, output_btc):
        '''
        Args:
            output: numpy 1*25200*7; 4 (xywh) + 1 (obj_conf) + n (cls_conf)

        Returns:

        '''
        # output = np.random.rand(2, 25200, 7)
        output_tc = output_btc[0]
        fuseconf_t = output_tc[:, 4] * np.max(output_tc[:, 5:], axis=1)  # fuse conf
        valid_output_tc = output_tc[fuseconf_t > self.rt_var['conf_param']]  # 根据置信度过滤
        # print(f'fuseconf_t: {fuseconf_t}')
        # print(f'valid_output_tc shape: {valid_output_tc.shape}')

        valid_fuseconf_t = valid_output_tc[:, 4] * np.max(valid_output_tc[:, 5:], axis=1)
        valid_clsid_t = np.argmax(valid_output_tc[:, 5:], axis=1)
        cx, cy, w, h = valid_output_tc[:, 0], valid_output_tc[:, 1], valid_output_tc[:, 2], valid_output_tc[:, 3]
        valid_xyxy_t4 = np.stack([cx - w / 2, cy - h / 2, cx + w / 2, cy + h / 2], axis=1)

        # nms
        keep_indices = self._nms(valid_xyxy_t4, valid_fuseconf_t, iou_threshold=self.rt_var['iou_param'])
        if len(keep_indices) == 0:
            return []
        valid_xyxy_t4 = valid_xyxy_t4[keep_indices]
        valid_fuseconf_t = valid_fuseconf_t[keep_indices]
        valid_clsid_t = valid_clsid_t[keep_indices]

        # 缩放回原图
        ratio = self.rt_var['ratio']
        dw, dh = self.rt_var['pad']
        print(f'ratio: {ratio}, pad: {dw, dh}')
        # 先减去 padding（因为 letterbox 图像是原图缩放后居中 padding 的）
        valid_xyxy_t4[:, [0, 2]] -= dw  # x1, x2 减去左右 padding
        valid_xyxy_t4[:, [1, 3]] -= dh  # y1, y2 减去上下 padding
        valid_xyxy_t4[:, [0, 2]] /= ratio[0]
        valid_xyxy_t4[:, [1, 3]] /= ratio[1]

        # 格式化输出
        # 返回检测结果：[x1, y1, x2, y2, score, class_id]
        detections = []
        for i in range(len(valid_xyxy_t4)):
            x1, y1, x2, y2 = valid_xyxy_t4[i]
            detections.append({
                "bbox": [int(x1), int(y1), int(x2), int(y2)],
                "score": float(valid_fuseconf_t[i]),
                "class_id": int(valid_clsid_t[i]),
            })
        return detections

    def infer(self, image):
        input_numpy = self._preprocess(image)
        print("YOLOv7OMInferenceWrapper input shape:", input_numpy.shape)
        output_numpy = self.engine.run(input_numpy)
        print("YOLOv7OMInferenceWrapper output shape:", output_numpy.shape)
        # print("output_numpy[0,0,:]:", output_numpy[0, 0, :])

        detections = self._postprocess(output_numpy)
        print("Detections:", detections)
        return detections


def ttest_om_inference():
    infer = OMInferenceWrapper(device_id=0)
    infer.load_model("yolov7-dump-truck-20251111060914.om")

    # YOLOv7 标准输入尺寸
    IMG_SIZE = 640
    input_numpy = np.random.rand(1, 3, IMG_SIZE, IMG_SIZE).astype(np.float32)
    input_numpy = np.repeat(input_numpy, 8, axis=0)  # 沿 batch 维度重复 8 次
    output = infer.run(input_numpy)
    print("Output shape:", output.shape)  # e.g., (1, 25200, 85)
    print("Output:", output[0, ...])
    print("Output:", output[1, ...])
    infer.destroy()


def ttest_yolov7_inference():
    # infer_handle = YOLOv7Inference("yolov7-dump-truck-20251111060914.om")
    # infer_handle = YOLOv7Inference("yolov7-dump-truck-20251111060914.onnx")
    infer_handle = YOLOv7Inference("yolov7-dump-truck-20251111060914.pt")

    # infer_handle = YOLOv7Inference("yolov7-dump-truck-b1-20251111060914.om")
    image = cv2.imread("test.jpg")

    detections = infer_handle.infer(image)

    # draw boxes
    shown_image = image.copy()
    # shown_image = cv2.imread("letterbox.jpg")
    for detection in detections:
        x1, y1, x2, y2 = detection["bbox"]
        cv2.rectangle(shown_image, (x1, y1), (x2, y2), (0, 255, 0), 2)
        cv2.putText(shown_image, f"{detection['class_id']}: {detection['score']:.2f}", (x1, y1 - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
    cv2.imwrite("output.jpg", shown_image)


def ttest_check():

    model_paths = [r"yolov7-dump-truck-20251111060914.pt", r"yolov7-dump-truck-20251111060914.onnx",
                   r"yolov7-dump-truck-20251111060914.om"]
    results = []

    image = cv2.imread("test.jpg")
    for model_path in model_paths:
        try:
            infer_handle = YOLOv7Inference(model_path)
            detections = infer_handle.infer(image)
        except Exception as e:
            detections = [e]
        results.append(detections)
    print('\n\n')
    for model_path, result in zip(model_paths, results):
        print(f"{model_path} 输出: {result}")


if __name__ == "__main__":
    # ttest_om_inference()
    ttest_yolov7_inference()
    # ttest_check()

# pt   [{'bbox': [1996, 572, 2751, 1132], 'score': 0.9794543981552124, 'class_id': 0}, {'bbox': [2034, 812, 2297, 887], 'score': 0.9406531453132629, 'class_id': 1}, {'bbox': [2457, 808, 2546, 854], 'score': 0.8651599884033203, 'class_id': 1}]
# onnx [{'bbox': [1996, 572, 2751, 1132], 'score': 0.9794544577598572, 'class_id': 0}, {'bbox': [2034, 812, 2297, 887], 'score': 0.9406532645225525, 'class_id': 1}, {'bbox': [24
