import os
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import logging
import numpy as np
import cv2


# 模型量化时感知训练微调参数，减小量化对模型精度的影响
class Calibrator(trt.IInt8EntropyCalibrator):
    def __init__(self, quantification=1, batch_size=1, height=640, width=640, calibration_images="", cache_file=""):
        trt.IInt8EntropyCalibrator.__init__(self)
        self.index = 0
        self.length = quantification
        self.batch_size = batch_size
        self.cache_file = cache_file
        self.height = height
        self.width = width
        self.img_list = [calibration_images + '/' + l for l in os.listdir(calibration_images)]
        self.calibration_data = np.zeros((self.batch_size, 3, self.height, self.width), dtype=np.float32)
        self.d_input = cuda.mem_alloc(self.calibration_data.nbytes)

    def next_batch(self):
        if self.index < self.length:
            for i in range(self.batch_size):
                img = cv2.imread(self.img_list[i + self.index * self.batch_size])
                img = self.preprocess(img)
                # img = self.scale_normal(img)
                self.calibration_data[i] = img
            self.index += 1
            return np.ascontiguousarray(self.calibration_data, dtype=np.float32)
        else:
            return np.array([])

    def __len__(self):
        return self.length

    def get_batch_size(self):
        return self.batch_size

    def get_batch(self, name):
        batch = self.next_batch()
        if not batch.size:
            return None
        cuda.memcpy_htod(self.d_input, batch)
        # return [int(self.d_input)]
        return [self.d_input]

    def read_calibration_cache(self):
        # If there is a cache, use it instead of calibrating again. Otherwise, implicitly return None.
        if os.path.exists(self.cache_file):
            with open(self.cache_file, "rb") as f:
                return f.read()

    def write_calibration_cache(self, cache):
        with open(self.cache_file, "wb") as f:
            f.write(cache)
            f.flush()

    # 前处理代码
    def preprocess(self, image):
        new_shape = [self.height, self.width]
        # print(self.new_shape)  # [640, 640]  onnx的输入形状
        shape = image.shape[:2]  # 图像大小 [height, width]  输入图像的形状

        scale = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
        img_new = cv2.resize(image, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)  # (640, 640, 3)

        dh, dw = new_shape[0] - img_new.shape[0], new_shape[1] - img_new.shape[1]  # 还需要填充的像素

        left, top = dw // 2, dh // 2  # 分别计算到上下左右需要填充的像素
        right, bottom = dw - left, dh - top
        pad = [left, top]

        resize_img = cv2.copyMakeBorder(img_new, top, bottom, left, right, cv2.BORDER_CONSTANT,
                                        value=(114, 114, 114))  # 填充黑边

        img = resize_img[:, :, ::-1].transpose([2, 0, 1]).astype(np.float32)  # (3, 640, 640)

        img /= 255.0

        return img

    # def preprocess(self, image):
    #     scale = min(self.height / image.shape[0], self.width / image.shape[1])
    #     real_img_ = cv2.resize(image, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)  # (416, 311, 3)
    #
    #     bottom = self.height - real_img_.shape[0]
    #     right = self.width - real_img_.shape[1]
    #
    #     real_img = cv2.copyMakeBorder(real_img_, 0, bottom, 0, right, cv2.BORDER_CONSTANT, value=0)  # (416, 416, 3)
    #     real_img = cv2.cvtColor(real_img, cv2.COLOR_BGR2RGB)
    #
    #     real_img = real_img.transpose(2, 0, 1)  # hwc --> chw
    #     real_img = real_img.astype(np.float32)  # 转为浮点型
    #     img = real_img / 255.  # 归一化到[0-1]
    #
    #     # img = (img - np.array([0.485, 0.456, 0.406])[..., None, None]) / np.array([0.229, 0.224, 0.225])[..., None, None]
    #
    #     img[0, :, :] = (img[0, :, :] - 0.485) / 0.229
    #     img[1, :, :] = (img[1, :, :] - 0.456) / 0.224
    #     img[2, :, :] = (img[2, :, :] - 0.406) / 0.225
    #
    #     # img = img[None]
    #
    #     return img


# LOGGER = trt.Logger(trt.Logger.VERBOSE)
# LOGGER = trt.Logger(trt.Logger.WARNING)

def buildEngine(onnx_file, engine_file, quantification, q_batch_size, max_batch_size, FP16_mode, INT8_mode,
                calibration_images, calibration_cache):
    # 创建显示batch，可以访问修改batch维度，动态输入必须要写
    explicit_batch = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)

    # 创建一个记录器
    logger = trt.Logger(trt.Logger.WARNING)
    # 创建一个构建器
    builder = trt.Builder(logger)
    # 创建网络定义
    network = builder.create_network(explicit_batch)
    # 创建 ONNX 解析器来填充网络
    parser = trt.OnnxParser(network, logger)

    # trt推理时最大支持的batchsize
    builder.max_batch_size = max_batch_size

    # 创建构建配置
    config = builder.create_builder_config()
    # 设置最大工作空间大小，右移运算符，等于1 * 2^30kb，等于 1gb
    config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, 1 << 30)
    # workspace = 1
    # config.max_workspace_size = workspace * 1 << 30
    # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, 16*(1<<20))
    # parser.parse_from_file(onnx_file)

    with open(onnx_file, 'rb') as model:
        print('Beginning ONNX file parsing')
        parser.parse(model.read())

    # onnx网络的输入大小
    print("onnx_input_shape:", network.get_input(0).shape)  # (1, 3, 416, 342)
    # onnx输出形状
    print("onnx_output_shape:", network.get_output(0).shape)  # (1, 4, 416, 342)
    n, c, h, w = network.get_input(0).shape
    # network.get_input(0).shape = (1, 3, h, w)  # 动态n由侦测时max_batch_size决定，原始为1，侦测时1-n都行
    input_name = network.get_input(0).name

    if FP16_mode == True:
        config.set_flag(trt.BuilderFlag.FP16)
    elif INT8_mode == True:
        config.set_flag(trt.BuilderFlag.INT8)
        config.int8_calibrator = Calibrator(quantification, q_batch_size, h, w, calibration_images, calibration_cache)

    # 动态输入问题解决方案
    # profile = builder.create_optimization_profile()
    # # 动态输入，[形状的最小，常规，最大值]
    # profile.set_shape(input_name, (1, 3, h, w), (1, 3, h, w), (max_batch_size, 3, h, w))
    # config.add_optimization_profile(profile)
    # # trt输入形状
    # print("trt_input_shape[形状的最小，常规，最大值]:", profile.get_shape(input_name))

    engine = builder.build_serialized_network(network, config)
    if engine is None:
        print("EXPORT ENGINE FAILED!")
        exit(0)

    # 保存engine文件
    with open(engine_file, "wb") as f:
        f.write(engine)


def main():
    quantification = 10  # 量化次数
    q_batch_size = 1  # 量化训练的batchsize
    max_batch_size = 1  # 动态模型检测时，最大的batchsize。动态batch的前提需要打包onnx时batch也是动态的
    calibration_images = r"data\coco128\images\trtint8"  # int8感知量化需要加载的数据集路径
    onnx_file = r"./weights/yolov8s-pose.onnx"

    engine_file = "./weights/yolov8s-pose_int8.engine"
    # engine_file = "./weights/yolov8s-pose_int8.engine"
    calibration_cache = "./weights/yolov8s-pose_int8.cache"

    '''
    模型使用单精度量化，设置 FP16_mode = False & INT8_mode = False  (FP32)
    模型使用半精度量化，设置 FP16_mode = True  & INT8_mode = False  (FP16)
    模型使用 Int8量化，设置 FP16_mode = False & INT8_mode = True   (INT8)
    '''
    FP16_mode = False
    INT8_mode = False

    if not os.path.exists(onnx_file):
        print("LOAD ONNX FILE FAILED: ", onnx_file)

    print('Load ONNX file from:%s \nStart export, Please wait a moment...' % (onnx_file))
    buildEngine(onnx_file, engine_file, quantification, q_batch_size, max_batch_size, FP16_mode, INT8_mode,
                calibration_images, calibration_cache)
    print('Export ENGINE success, Save as: ', engine_file)


if __name__ == '__main__':
    main()
