zhengrongzhang commited on
Commit
9d8126f
1 Parent(s): ef9fe29

init model

Browse files
Files changed (4) hide show
  1. README.md +60 -0
  2. ResNet_int.onnx +3 -0
  3. eval_onnx.py +164 -0
  4. requirements.txt +4 -0
README.md ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ datasets:
4
+ - imagenet-1k
5
+ metrics:
6
+ - accuracy
7
+ tags:
8
+ - RyzenAI
9
+ - vision
10
+ - classification
11
+ - pytorch
12
+ ---
13
+
14
+ # ResNet-50 v1.5
15
+ Quantized ResNet model that could be supported by [AMD Ryzen AI](https://ryzenai.docs.amd.com/en/latest/).
16
+
17
+
18
+ ## Model description
19
+ ResNet (Residual Network) was first introduced in the paper Deep Residual Learning for Image Recognition by He et al.
20
+
21
+ This model is ResNet50 v1.5 from [torchvision](https://pytorch.org/vision/main/models/generated/torchvision.models.resnet50.html).
22
+
23
+
24
+ ## How to use
25
+
26
+ ### Installation
27
+
28
+ Follow [Ryzen AI Installation](https://ryzenai.docs.amd.com/en/latest/inst.html) to prepare the environment for Ryzen AI.
29
+ Run the following script to install pre-requisites for this model.
30
+
31
+ ```bash
32
+ pip install -r requirements.txt
33
+ ```
34
+
35
+ ### Data Preparation
36
+
37
+ Follow [PyTorch Example](https://github.com/pytorch/examples/blob/main/imagenet/README.md#requirements) to prepare dataset.
38
+
39
+ ### Model Evaluation
40
+
41
+ ```python
42
+ python eval_onnx.py --onnx_model ResNet_int.onnx --ipu --provider_config Path\To\vaip_config.json --data_dir /Path/To/Your/Dataset
43
+ ```
44
+
45
+ ### Performance
46
+
47
+ |Metric |Accuracy on IPU|
48
+ | :----: | :----: |
49
+ |Top1/Top5| 76.17% / 92.86%|
50
+
51
+
52
+ ```bibtex
53
+ @article{He2015,
54
+ author={Kaiming He and Xiangyu Zhang and Shaoqing Ren and Jian Sun},
55
+ title={Deep Residual Learning for Image Recognition},
56
+ journal={arXiv preprint arXiv:1512.03385},
57
+ year={2015}
58
+ }
59
+
60
+ ```
ResNet_int.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2049c9e8ccf2999188123cf582f716c16771d0ffeae90247bddf37cb7cb8935
3
+ size 102275586
eval_onnx.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ from typing import Tuple
4
+
5
+ import argparse
6
+ import onnxruntime
7
+ import os
8
+ import sys
9
+ import time
10
+ import torch
11
+ import torchvision.datasets as datasets
12
+ import torchvision.transforms as transforms
13
+
14
+ from torch.utils.data import DataLoader
15
+ from tqdm import tqdm
16
+
17
+ parser = argparse.ArgumentParser()
18
+ parser.add_argument(
19
+ "--onnx_model", default="model.onnx", help="Input onnx model")
20
+ parser.add_argument(
21
+ "--data_dir",
22
+ default="/workspace/dataset/imagenet",
23
+ help="Directory of dataset")
24
+ parser.add_argument(
25
+ "--batch_size", default=1, type=int, help="Evaluation batch size")
26
+ parser.add_argument(
27
+ "--ipu",
28
+ action="store_true",
29
+ help="Use IPU for inference.",
30
+ )
31
+ parser.add_argument(
32
+ "--provider_config",
33
+ type=str,
34
+ default="vaip_config.json",
35
+ help="Path of the config file for seting provider_options.",
36
+ )
37
+ args = parser.parse_args()
38
+
39
+ class AverageMeter(object):
40
+ """Computes and stores the average and current value"""
41
+
42
+ def __init__(self, name, fmt=':f'):
43
+ self.name = name
44
+ self.fmt = fmt
45
+ self.reset()
46
+
47
+ def reset(self):
48
+ self.val = 0
49
+ self.avg = 0
50
+ self.sum = 0
51
+ self.count = 0
52
+
53
+ def update(self, val, n=1):
54
+ self.val = val
55
+ self.sum += val * n
56
+ self.count += n
57
+ self.avg = self.sum / self.count
58
+
59
+ def __str__(self):
60
+ fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
61
+ return fmtstr.format(**self.__dict__)
62
+
63
+ def accuracy(output: torch.Tensor,
64
+ target: torch.Tensor,
65
+ topk: Tuple[int] = (1,)) -> Tuple[float]:
66
+ """Computes the accuracy over the k top predictions for the specified values of k.
67
+ Args:
68
+ output: Prediction of the model.
69
+ target: Ground truth labels.
70
+ topk: Topk accuracy to compute.
71
+
72
+ Returns:
73
+ Accuracy results according to 'topk'.
74
+ """
75
+
76
+ with torch.no_grad():
77
+ maxk = max(topk)
78
+ batch_size = target.size(0)
79
+
80
+ _, pred = output.topk(maxk, 1, True, True)
81
+ pred = pred.t()
82
+ correct = pred.eq(target.view(1, -1).expand_as(pred))
83
+
84
+ res = []
85
+ for k in topk:
86
+ correct_k = correct[:k].contiguous().view(-1).float().sum(0, keepdim=True)
87
+ res.append(correct_k.mul_(100.0 / batch_size))
88
+ return res
89
+
90
+ def prepare_data_loader(data_dir: str,
91
+ batch_size: int = 100,
92
+ workers: int = 8) -> torch.utils.data.DataLoader:
93
+ """Returns a validation data loader of ImageNet by given `data_dir`.
94
+
95
+ Args:
96
+ data_dir: Directory where images stores. There must be a subdirectory named
97
+ 'validation' that stores the validation set of ImageNet.
98
+ batch_size: Batch size of data loader.
99
+ workers: How many subprocesses to use for data loading.
100
+
101
+ Returns:
102
+ An object of torch.utils.data.DataLoader.
103
+ """
104
+
105
+ valdir = os.path.join(data_dir, 'validation')
106
+
107
+ normalize = transforms.Normalize(
108
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
109
+ val_dataset = datasets.ImageFolder(
110
+ valdir,
111
+ transforms.Compose([
112
+ transforms.Resize(256),
113
+ transforms.CenterCrop(224),
114
+ transforms.ToTensor(),
115
+ normalize,
116
+ ]))
117
+
118
+ return torch.utils.data.DataLoader(
119
+ val_dataset,
120
+ batch_size=batch_size,
121
+ shuffle=False,
122
+ num_workers=workers,
123
+ pin_memory=True)
124
+
125
+ def val_imagenet():
126
+ """Validate ONNX model on ImageNet dataset."""
127
+ print(f'Current onnx model: {args.onnx_model}')
128
+
129
+ if args.ipu:
130
+ providers = ["VitisAIExecutionProvider"]
131
+ provider_options = [{"config_file": args.provider_config}]
132
+ else:
133
+ providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
134
+ provider_options = None
135
+ ort_session = onnxruntime.InferenceSession(
136
+ args.onnx_model, providers=providers, provider_options=provider_options)
137
+
138
+ val_loader = prepare_data_loader(args.data_dir, args.batch_size)
139
+
140
+ top1 = AverageMeter('Acc@1', ':6.2f')
141
+ top5 = AverageMeter('Acc@5', ':6.2f')
142
+
143
+ start_time = time.time()
144
+ val_loader = tqdm(val_loader, file=sys.stdout)
145
+ with torch.no_grad():
146
+ for batch_idx, (images, targets) in enumerate(val_loader):
147
+ inputs, targets = images.numpy(), targets
148
+ ort_inputs = {ort_session.get_inputs()[0].name: inputs}
149
+
150
+ outputs = ort_session.run(None, ort_inputs)
151
+ outputs = torch.from_numpy(outputs[0])
152
+
153
+ acc1, acc5 = accuracy(outputs, targets, topk=(1, 5))
154
+ top1.update(acc1, images.size(0))
155
+ top5.update(acc5, images.size(0))
156
+
157
+ current_time = time.time()
158
+ print('Test Top1 {:.2f}%\tTop5 {:.2f}%\tTime {:.2f}s\n'.format(
159
+ float(top1.avg), float(top5.avg), (current_time - start_time)))
160
+
161
+ return top1.avg, top5.avg
162
+
163
+ if __name__ == '__main__':
164
+ val_imagenet()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ torch==1.13
2
+ torchvision
3
+ tqdm
4
+ # onnxruntime