mohitsha HF staff commited on
Commit
f4fe1f7
1 Parent(s): 8ceb58c

Add quantized files

Browse files
Files changed (4) hide show
  1. config.json +105 -0
  2. model_quantized.onnx +3 -0
  3. quantize.py +63 -0
  4. ryzenai_config.json +14 -0
config.json ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architecture": "resnet18",
3
+ "num_classes": 1000,
4
+ "num_features": 512,
5
+ "pretrained_cfg": {
6
+ "_name_or_path": "",
7
+ "add_cross_attention": false,
8
+ "architectures": null,
9
+ "bad_words_ids": null,
10
+ "begin_suppress_tokens": null,
11
+ "bos_token_id": null,
12
+ "chunk_size_feed_forward": 0,
13
+ "classifier": "fc",
14
+ "crop_mode": "center",
15
+ "crop_pct": 0.95,
16
+ "cross_attention_hidden_size": null,
17
+ "custom_load": false,
18
+ "decoder_start_token_id": null,
19
+ "diversity_penalty": 0.0,
20
+ "do_sample": false,
21
+ "early_stopping": false,
22
+ "encoder_no_repeat_ngram_size": 0,
23
+ "eos_token_id": null,
24
+ "exponential_decay_length_penalty": null,
25
+ "finetuning_task": null,
26
+ "first_conv": "conv1",
27
+ "fixed_input_size": false,
28
+ "forced_bos_token_id": null,
29
+ "forced_eos_token_id": null,
30
+ "id2label": {
31
+ "0": "LABEL_0",
32
+ "1": "LABEL_1"
33
+ },
34
+ "input_size": [
35
+ 3,
36
+ 224,
37
+ 224
38
+ ],
39
+ "interpolation": "bicubic",
40
+ "is_decoder": false,
41
+ "is_encoder_decoder": false,
42
+ "label2id": {
43
+ "LABEL_0": 0,
44
+ "LABEL_1": 1
45
+ },
46
+ "length_penalty": 1.0,
47
+ "max_length": 20,
48
+ "mean": [
49
+ 0.485,
50
+ 0.456,
51
+ 0.406
52
+ ],
53
+ "min_length": 0,
54
+ "model_type": "",
55
+ "no_repeat_ngram_size": 0,
56
+ "num_beam_groups": 1,
57
+ "num_beams": 1,
58
+ "num_classes": 1000,
59
+ "num_return_sequences": 1,
60
+ "origin_url": "https://github.com/huggingface/pytorch-image-models",
61
+ "output_attentions": false,
62
+ "output_hidden_states": false,
63
+ "output_scores": false,
64
+ "pad_token_id": null,
65
+ "paper_ids": "arXiv:2110.00476",
66
+ "pool_size": [
67
+ 7,
68
+ 7
69
+ ],
70
+ "prefix": null,
71
+ "problem_type": null,
72
+ "pruned_heads": {},
73
+ "remove_invalid_values": false,
74
+ "repetition_penalty": 1.0,
75
+ "return_dict": true,
76
+ "return_dict_in_generate": false,
77
+ "sep_token_id": null,
78
+ "std": [
79
+ 0.229,
80
+ 0.224,
81
+ 0.225
82
+ ],
83
+ "suppress_tokens": null,
84
+ "tag": "a1_in1k",
85
+ "task_specific_params": null,
86
+ "temperature": 1.0,
87
+ "test_crop_pct": 1.0,
88
+ "test_input_size": [
89
+ 3,
90
+ 288,
91
+ 288
92
+ ],
93
+ "tf_legacy_loss": false,
94
+ "tie_encoder_decoder": false,
95
+ "tie_word_embeddings": true,
96
+ "tokenizer_class": null,
97
+ "top_k": 50,
98
+ "top_p": 1.0,
99
+ "torch_dtype": null,
100
+ "torchscript": false,
101
+ "typical_p": 1.0,
102
+ "use_bfloat16": false
103
+ },
104
+ "transformers_version": "4.36.2"
105
+ }
model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0723876d0a12903d84705a98c29851774fc947448b43b730db84f9ae15976415
3
+ size 11731625
quantize.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ import timm
3
+ from optimum.amd.ryzenai import (
4
+ AutoQuantizationConfig,
5
+ RyzenAIOnnxQuantizer,
6
+ )
7
+ from optimum.exporters.onnx import main_export
8
+ from transformers import PretrainedConfig
9
+
10
+ # Define paths for exporting ONNX model and saving quantized model
11
+ export_dir = "resnet_onnx"
12
+ quantization_dir = "resnet_onnx_quantized"
13
+
14
+ # Specify the model ID from Timm
15
+ model_id = "timm/resnet18.a1_in1k"
16
+
17
+ # Step 1: Export the model to ONNX format using Optimum Exporters
18
+ main_export(
19
+ model_name_or_path=model_id,
20
+ output=export_dir,
21
+ task="image-classification",
22
+ opset=13,
23
+ batch_size=1,
24
+ no_dynamic_axes=True,
25
+ )
26
+
27
+ # Step 2: Preprocess configuration and data transformations
28
+ config = PretrainedConfig.from_pretrained(export_dir)
29
+ data_config = timm.data.resolve_data_config(pretrained_cfg=config.pretrained_cfg)
30
+ transforms = timm.data.create_transform(**data_config, is_training=False)
31
+
32
+ def preprocess_fn(ex, transforms):
33
+ image = ex["image"]
34
+ if image.mode == "L":
35
+ # Convert greyscale to RGB if needed
36
+ print("WARNING: converting greyscale to RGB")
37
+ image = image.convert("RGB")
38
+ pixel_values = transforms(image)
39
+ return {"pixel_values": pixel_values}
40
+
41
+ # Step 3: Initialize the RyzenAIOnnxQuantizer with the exported model
42
+ quantizer = RyzenAIOnnxQuantizer.from_pretrained(export_dir)
43
+
44
+ # Step 4: Load recommended quantization config for model
45
+ quantization_config = AutoQuantizationConfig.ipu_cnn_config()
46
+
47
+ # Step 5: Obtain a calibration dataset for computing quantization parameters
48
+ train_calibration_dataset = quantizer.get_calibration_dataset(
49
+ "imagenet-1k",
50
+ preprocess_function=partial(preprocess_fn, transforms=transforms),
51
+ num_samples=100,
52
+ dataset_split="train",
53
+ preprocess_batch=False,
54
+ streaming=True,
55
+ )
56
+
57
+ # Step 6: Run the quantizer with the specified configuration and calibration data
58
+ quantizer.quantize(
59
+ quantization_config=quantization_config,
60
+ dataset=train_calibration_dataset,
61
+ save_dir=quantization_dir
62
+ )
63
+
ryzenai_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "opset": null,
3
+ "optimum_version": "1.17.0.dev0",
4
+ "quantization": {
5
+ "activations_dtype": "QUInt8",
6
+ "activations_symmetric": true,
7
+ "calibration_method": "MinMSE",
8
+ "enable_dpu": true,
9
+ "format": "QDQ",
10
+ "weights_dtype": "QInt8",
11
+ "weights_symmetric": true
12
+ },
13
+ "transformers_version": "4.36.2"
14
+ }