File size: 2,407 Bytes
d7a991a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
#!/usr/bin/env bash
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import time

import torch
from mmcv import Config
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel
from mmcv.runner.fp16_utils import wrap_fp16_model

from mmpose.datasets import build_dataloader, build_dataset
from mmpose.models import build_posenet


def parse_args():
    parser = argparse.ArgumentParser(
        description='MMPose benchmark a recognizer')
    parser.add_argument('config', help='test config file path')
    parser.add_argument('--bz', default=32, type=int, help='test config file path')
    args = parser.parse_args()
    return args


def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)

    # Since we only care about the forward speed of the network
    cfg.model.pretrained=None
    cfg.model.test_cfg.flip_test=False
    cfg.model.test_cfg.use_udp=False
    cfg.model.test_cfg.post_process='none'

    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True

    # build the dataloader
    dataset = build_dataset(cfg.data.val)
    data_loader = build_dataloader(
        dataset,
        samples_per_gpu=args.bz,
        workers_per_gpu=cfg.data.workers_per_gpu,
        dist=False,
        shuffle=False)

    # build the model and load checkpoint
    model = build_posenet(cfg.model)
    model = MMDataParallel(model, device_ids=[0])
    model.eval()
    
    # get the example data
    for i, data in enumerate(data_loader):
        break

    # the first several iterations may be very slow so skip them
    num_warmup = 100
    inference_times = 100

    with torch.no_grad():
        start_time = time.perf_counter()
      
        for i in range(num_warmup):
            torch.cuda.synchronize()            
            model(return_loss=False, **data)
            torch.cuda.synchronize()
        
        elapsed = time.perf_counter() - start_time
        print(f'warmup cost {elapsed} time')

        start_time = time.perf_counter()
        
        for i in range(inference_times):
            torch.cuda.synchronize()
            model(return_loss=False, **data)
            torch.cuda.synchronize()

        elapsed = time.perf_counter() - start_time
        fps = args.bz * inference_times / elapsed
        print(f'the fps is {fps}')


if __name__ == '__main__':
    main()