zhengrongzhang commited on
Commit
1fec2a7
1 Parent(s): f04cda5

init model

Browse files
Files changed (5) hide show
  1. README.md +67 -0
  2. create_image_list.py +46 -0
  3. eval_onnx.py +161 -0
  4. inceptionv4_int8.onnx +3 -0
  5. requirements.txt +3 -0
README.md ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ datasets:
4
+ - imagenet-1k
5
+ metrics:
6
+ - accuracy
7
+ tags:
8
+ - RyzenAI
9
+ - vision
10
+ - classification
11
+ - pytorch
12
+ ---
13
+
14
+ # Inception_v4
15
+ Quantized Inception_v4 model that could be supported by [AMD Ryzen AI](https://ryzenai.docs.amd.com/en/latest/).
16
+
17
+
18
+ ## Model description
19
+ Inception_v4 was first introduced in the paper [Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning](https://arxiv.org/abs/1602.07261).
20
+
21
+ The model implementaion is from [TensorFlow-Slim](https://github.com/tensorflow/models/tree/master/research/slim).
22
+
23
+
24
+ ## How to use
25
+
26
+ ### Installation
27
+
28
+ Follow [Ryzen AI Installation](https://ryzenai.docs.amd.com/en/latest/inst.html) to prepare the environment for Ryzen AI.
29
+ Run the following script to install pre-requisites for this model.
30
+
31
+ ```bash
32
+ pip install -r requirements.txt
33
+ ```
34
+
35
+ ### Data Preparation
36
+
37
+ Follow [imagenet-1k](https://huggingface.co/datasets/imagenet-1k) to download dataset.
38
+
39
+ Download [ImageNet validation synset labels file](https://github.com/tensorflow/models/blob/master/research/slim/datasets/imagenet_2012_validation_synset_labels.txt).
40
+
41
+ Create validation image list:
42
+ ```bash
43
+ python create_image_list.py imagenet_2012_validation_synset_labels.txt
44
+ ```
45
+
46
+ ### Model Evaluation
47
+
48
+ ```python
49
+ python eval_onnx.py --onnx_model inception_v4_int8.onnx --ipu --provider_config Path\To\vaip_config.json --val_data_dir /Path/To/Your/Validation/Data --val_image_list val.txt
50
+ ```
51
+
52
+ ### Performance
53
+
54
+ |Metric |Accuracy on IPU|
55
+ | :----: | :----: |
56
+ |Top1/Top5| 79.92% / 95.02%|
57
+
58
+
59
+ ```bibtex
60
+ @article{Szegedy2016Inceptionv4IA,
61
+ title={Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning},
62
+ author={Christian Szegedy and Sergey Ioffe and Vincent Vanhoucke and Alexander A. Alemi},
63
+ journal={arXiv:1602.07261},
64
+ year={2016},
65
+ }
66
+
67
+ ```
create_image_list.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ r"""Create a list containing image names and their corresponding class indices
3
+ for ImageNet validation data.
4
+
5
+ The directory structure of the original ImageNet validation data set is
6
+ expected to be:
7
+
8
+ data_dir/ILSVRC2012_val_00000001.JPEG
9
+ data_dir/ILSVRC2012_val_00000002.JPEG
10
+ ...
11
+ date_dir/ILSVRC2012_val_00050000.JPEG
12
+
13
+ This script generate a list like:
14
+ ILSVRC2012_val_00000001.JPEG 65
15
+ ILSVRC2012_val_00000002.JPEG 970
16
+ ...
17
+ ILSVRC2012_val_00050000.JPEG 355
18
+
19
+ Usage:
20
+ Download https://github.com/tensorflow/models/blob/master/research/slim/datasets/imagenet_2012_validation_synset_labels.txt and then,
21
+ ./create_image_labels.py imagenet_2012_validation_synset_labels.txt
22
+ """
23
+
24
+ import sys
25
+
26
+ if __name__ == '__main__':
27
+ if len(sys.argv) < 2:
28
+ print('Usage: ./create_image_labels.py <labels file>')
29
+ sys.exit(-1)
30
+ labels_file = sys.argv[1]
31
+
32
+ labels = [l.strip() for l in open(labels_file).readlines()]
33
+ sorted_labels = sorted(labels)
34
+ class_idx = 0
35
+ label_to_class_idx = {}
36
+ for label in sorted_labels:
37
+ if label not in label_to_class_idx:
38
+ label_to_class_idx[label] = class_idx
39
+ class_idx += 1
40
+
41
+ image_list_file = 'val.txt'
42
+ with open(image_list_file, 'w') as f:
43
+ for i, label in enumerate(labels):
44
+ image_name = 'ILSVRC2012_val_000%.5d.JPEG' % (i + 1)
45
+ f.write(f'{image_name} {label_to_class_idx[label]}\n')
46
+ print(f'Output image list file: {image_list_file}')
eval_onnx.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import argparse
4
+ import math
5
+ import numpy as np
6
+ import os
7
+ import time
8
+
9
+ import tensorflow.compat.v1 as tf
10
+ tf.disable_v2_behavior()
11
+
12
+ import onnxruntime as ort
13
+
14
+ parser = argparse.ArgumentParser()
15
+ parser.add_argument(
16
+ "--onnx_model", default="inceptionv4_int8.onnx", help="Input onnx model")
17
+ parser.add_argument(
18
+ "--val_data_dir",
19
+ default="/workspace/dataset/imagenet/val",
20
+ help="Data directory of validation set")
21
+ parser.add_argument(
22
+ "--val_image_list",
23
+ default="/workspace/dataset/imagenet/val.txt",
24
+ help="Validation images list")
25
+ parser.add_argument(
26
+ "--subset_len",
27
+ default=50000,
28
+ type=int,
29
+ help="Subset length of validation set to use")
30
+ parser.add_argument(
31
+ "--batch_size", default=1, type=int, help="Validation batch size")
32
+ parser.add_argument(
33
+ "--ipu",
34
+ action="store_true",
35
+ help="Use IPU for inference.",
36
+ )
37
+ parser.add_argument(
38
+ "--provider_config",
39
+ type=str,
40
+ default="vaip_config.json",
41
+ help="Path of the config file for seting provider_options.",
42
+ )
43
+ args = parser.parse_args()
44
+
45
+ class DataLoader(object):
46
+
47
+ def __init__(self, height=224, width=224):
48
+ self.output_height = height
49
+ self.output_width = width
50
+
51
+ def _inception_preprocess(self,
52
+ image,
53
+ central_fraction=0.875,
54
+ central_crop=True):
55
+ image = tf.image.convert_image_dtype(image, dtype=tf.float32)
56
+ # Crop the central region of the image with an area containing 87.5% of
57
+ # the original image.
58
+ if central_crop and central_fraction:
59
+ image = tf.image.central_crop(image, central_fraction=central_fraction)
60
+
61
+ if self.output_height and self.output_width:
62
+ image = tf.expand_dims(image, 0)
63
+ image = tf.image.resize_bilinear(
64
+ image, [self.output_height, self.output_width], align_corners=False)
65
+ image = tf.subtract(image, 0.5)
66
+ image = tf.multiply(image, 2.0)
67
+ return image
68
+
69
+ def _build_placeholder(self):
70
+ input_image_path = tf.placeholder(
71
+ tf.string, shape=(None), name="input_image_path")
72
+ image = tf.io.read_file(input_image_path)
73
+ image = tf.image.decode_jpeg(image, channels=3)
74
+ return image, input_image_path
75
+
76
+ def build_preprocess(self):
77
+ """Returns image tensor used to read image."""
78
+ image, input_image_path = self._build_placeholder()
79
+ image = self._inception_preprocess(image)
80
+ return image, input_image_path
81
+
82
+ def main():
83
+ input_shape = (299, 299, 3)
84
+ label_offset = 0
85
+
86
+ with tf.Session() as tf_session:
87
+ loader = DataLoader(input_shape[0], input_shape[1])
88
+ image, image_path = loader.build_preprocess()
89
+ in_image = tf.placeholder(
90
+ tf.float32, shape=(None,) + input_shape, name='in_image')
91
+ in_label = tf.placeholder(tf.int64, shape=(None, 1), name='in_label')
92
+ num_classes = 1001 - label_offset
93
+ logits = tf.placeholder(
94
+ tf.float32, shape=(None, num_classes), name='logits')
95
+ top1, top1_update = tf.metrics.recall_at_k(
96
+ in_label, logits, 1, name="precision_top1")
97
+ top5, top5_update = tf.metrics.recall_at_k(
98
+ in_label, logits, 5, name="precision_top5")
99
+
100
+ var_list = tf.get_collection(
101
+ tf.GraphKeys.LOCAL_VARIABLES, scope="precision")
102
+ vars_initializer = tf.variables_initializer(var_list=var_list)
103
+ tf_session.run(vars_initializer)
104
+
105
+ with open(args.val_image_list, 'r') as fr:
106
+ lines = fr.readlines()
107
+ if args.subset_len > len(lines):
108
+ raise ValueError(
109
+ "subset_len(%d) should be less or equal than total images(%d)." %
110
+ (args.subset_len, len(lines)))
111
+ eval_steps = math.ceil(args.subset_len / args.batch_size)
112
+ start_t = time.time()
113
+
114
+ if args.ipu:
115
+ providers = ["VitisAIExecutionProvider"]
116
+ provider_options = [{"config_file": args.provider_config}]
117
+ else:
118
+ providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
119
+ provider_options = None
120
+ ort_session = ort.InferenceSession(
121
+ args.onnx_model, providers=providers, provider_options=provider_options)
122
+
123
+ for step in range(eval_steps):
124
+ print(f'Eval step {step} / {eval_steps}')
125
+ batch_images = []
126
+ batch_labels = []
127
+ for i in range(args.batch_size):
128
+ index = step * args.batch_size + i
129
+ if index >= args.subset_len:
130
+ break
131
+
132
+ img_path, label = lines[index].strip().split(" ")
133
+ img_path = os.path.join(args.val_data_dir, img_path)
134
+ # Run session to get image from feeded image path.
135
+ image_val = tf_session.run(image, feed_dict={image_path: img_path})
136
+ batch_images.append(image_val)
137
+
138
+ label = int(label) + 1 - label_offset
139
+ label = np.array([label], dtype=np.int64)
140
+ batch_labels.append(label)
141
+
142
+ batch_images = batch_images[0] if args.batch_size == 1 else np.squeeze(
143
+ batch_images)
144
+ ort_inputs = {ort_session.get_inputs()[0].name: batch_images}
145
+ outputs = ort_session.run(None, ort_inputs)
146
+
147
+ # Update top1/5 metric.
148
+ tf_session.run([top1_update, top5_update],
149
+ feed_dict={
150
+ in_image: batch_images,
151
+ in_label: batch_labels,
152
+ logits: outputs[0]
153
+ })
154
+ end_t = time.time()
155
+ top1_val, top5_val = tf_session.run([top1, top5])
156
+ print('Recall_1 = [%s]' % str(top1_val))
157
+ print('Recall_5 = [%s]' % str(top5_val))
158
+ print('Use_time = [%s]' % str(end_t - start_t))
159
+
160
+ if __name__ == "__main__":
161
+ main()
inceptionv4_int8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abe297edd24883a9da0e7a199e4dbf610eabff71073e44f73557f5a6da393b7a
3
+ size 171239074
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ tensorflow==2.15.0
2
+ numpy
3
+ # onnxruntime