File size: 2,030 Bytes
f4fe1f7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
from functools import partial
import timm
from optimum.amd.ryzenai import (
    AutoQuantizationConfig,
    RyzenAIOnnxQuantizer,
)
from optimum.exporters.onnx import main_export
from transformers import PretrainedConfig

# Define paths for exporting ONNX model and saving quantized model
export_dir = "resnet_onnx"
quantization_dir = "resnet_onnx_quantized"

# Specify the model ID from Timm
model_id = "timm/resnet18.a1_in1k"

# Step 1: Export the model to ONNX format using Optimum Exporters
main_export(
    model_name_or_path=model_id,
    output=export_dir,
    task="image-classification",
    opset=13,
    batch_size=1,
    no_dynamic_axes=True,
)

# Step 2: Preprocess configuration and data transformations
config = PretrainedConfig.from_pretrained(export_dir)
data_config = timm.data.resolve_data_config(pretrained_cfg=config.pretrained_cfg)
transforms = timm.data.create_transform(**data_config, is_training=False)

def preprocess_fn(ex, transforms):
    image = ex["image"]
    if image.mode == "L":
        # Convert greyscale to RGB if needed
        print("WARNING: converting greyscale to RGB")
        image = image.convert("RGB")
    pixel_values = transforms(image)
    return {"pixel_values": pixel_values}

# Step 3: Initialize the RyzenAIOnnxQuantizer with the exported model
quantizer = RyzenAIOnnxQuantizer.from_pretrained(export_dir)

# Step 4: Load recommended quantization config for model
quantization_config = AutoQuantizationConfig.ipu_cnn_config()

# Step 5: Obtain a calibration dataset for computing quantization parameters
train_calibration_dataset = quantizer.get_calibration_dataset(
    "imagenet-1k",
    preprocess_function=partial(preprocess_fn, transforms=transforms),
    num_samples=100,
    dataset_split="train",
    preprocess_batch=False,
    streaming=True,
)

# Step 6: Run the quantizer with the specified configuration and calibration data
quantizer.quantize(
    quantization_config=quantization_config,
    dataset=train_calibration_dataset,
    save_dir=quantization_dir
)