File size: 1,856 Bytes
c02e4ab |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
from functools import partial
from optimum.amd.ryzenai import (
AutoQuantizationConfig,
RyzenAIOnnxQuantizer,
)
from optimum.exporters.onnx import main_export
from transformers import AutoFeatureExtractor
# Define paths for exporting ONNX model and saving quantized model
export_dir = "resnet_onnx"
quantization_dir = "resnet_onnx_quantized"
# Specify the model ID from Transformers
model_id = "microsoft/resnet-18"
# Step 1: Export the model to ONNX format using Optimum Exporters
main_export(
model_name_or_path=model_id,
output=export_dir,
task="image-classification",
opset=13,
batch_size=1,
height=224,
width=224,
no_dynamic_axes=True,
)
# Step 2: Preprocess configuration and data transformations
feature_extractor = AutoFeatureExtractor.from_pretrained(model_id)
def preprocess_fn(ex, feature_extractor):
image = ex["image"]
if image.mode == "L":
image = image.convert("RGB")
pixel_values = feature_extractor(image).pixel_values[0]
return {"pixel_values": pixel_values}
# Step 3: Initialize the RyzenAIOnnxQuantizer with the exported model
quantizer = RyzenAIOnnxQuantizer.from_pretrained(export_dir)
# Step 4: Load recommended quantization config for model
quantization_config = AutoQuantizationConfig.ipu_cnn_config()
# Step 5: Obtain a calibration dataset for computing quantization parameters
train_calibration_dataset = quantizer.get_calibration_dataset(
"imagenet-1k",
preprocess_function=partial(preprocess_fn, feature_extractor=feature_extractor),
num_samples=100,
dataset_split="train",
preprocess_batch=False,
streaming=True,
)
# Step 6: Run the quantizer with the specified configuration and calibration data
quantizer.quantize(
quantization_config=quantization_config, dataset=train_calibration_dataset, save_dir=quantization_dir
)
|