# quantize_yolov8.py
# ==============================
# 修复 ONNX 缺失 INT4/UINT4 的问题
# ==============================
import onnx
onnx_proto = onnx.TensorProto
if not hasattr(onnx_proto, 'INT4'):
    onnx_proto.INT4 = 20
if not hasattr(onnx_proto, 'UINT4'):
    onnx_proto.UINT4 = 21
# ==============================
import os
import numpy as np
from PIL import Image
from onnxruntime.quantization import QuantType, quantize_static
from onnxruntime.quantization.calibrate import CalibrationDataReader


class YOLOv8CalibrationDataReader(CalibrationDataReader):
    def __init__(self, image_folder, input_shape=(1, 3, 320, 320)):
        self.images = [os.path.join(image_folder, f) for f in os.listdir(image_folder)
                       if f.lower().endswith(('.png', '.jpg', '.jpeg'))]
        self.input_name = "images"  # YOLOv8 ONNX 默认输入名
        self.input_shape = input_shape
        self.iter = iter(self._preprocess_images())

    def _preprocess_images(self):
        for img_path in self.images[:179]:  # 只用前100张做校准
            img = Image.open(img_path).convert("RGB")
            img = img.resize((self.input_shape[-1], self.input_shape[-2]))
            img_data = np.array(img).astype(np.float32) / 255.0  # 归一化到 [0,1]
            img_data = np.transpose(img_data, (2, 0, 1))  # HWC -> CHW
            img_data = np.expand_dims(img_data, axis=0)  # NCHW
            yield {self.input_name: img_data}

    def get_next(self):
        return next(self.iter, None)


# 执行量化
if __name__ == "__main__":
    input_model = r"D:\CodeCNN\yolov8-study\runs\detect\train31\weights\best31n-1-7-2100.onnx"
    output_model = r"D:\CodeCNN\yolov8-study\runs\detect\train31\weights\best31n_int8.onnx"
    calibration_data_folder = r"D:\Datasets\quantization_pic"  # 替换为你的校准图像目录

    dr = YOLOv8CalibrationDataReader(calibration_data_folder, input_shape=(1, 3, 320, 320))

    quantize_static(
        input_model,
        output_model,
        dr,
        # quant_format="QLinearOps",  # 必须用 QLinearOps（兼容 ONNX Runtime 1.6.0）
        activation_type=QuantType.QUInt8,
        weight_type=QuantType.QInt8,
        op_types_to_quantize=['Conv', 'MatMul'],
        per_channel=False,  # ONNX Runtime 1.6.0 不支持 per_channel=True
        reduce_range=False
    )
    print(f"Quantized model saved to {output_model}")