# Copyright (c) Soumith Chintala 2016,
# All rights reserved
#
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://spdx.org/licenses/BSD-3-Clause.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import torch
import time
import argparse
import os
import torch.nn.functional as F

###### 需自行修改的部分 start ######

# 请自行指定运行方式，通常需要正向和反向
def run(x):
    o = F.softplus(x)
    l = o.mean()
    l.backward()
    return o

# 请自行指定输入shape
shape_list = ([128, 64, 128, 128], [128, 128, 64, 64],
              [128, 256, 64, 64], [128, 256, 32, 32],
              [128, 512, 32, 32], [128, 512, 16, 16],
              [128, 1024, 16, 16], [128, 1024, 8, 8],
              [128, 2048, 8, 8])
###### 需自行修改的部分 end ######

parser = argparse.ArgumentParser(description='PyTorch Prof')
parser.add_argument('--device', type=str, default='cpu',
                    help='set which type of device used. Support cpu, cuda:0(device_id), npu:0(device_id).')
args = parser.parse_args()

if args.device.startswith('cuda'):
    torch.cuda.set_device(args.device)
    prof_kwargs = {'use_cuda': True}
    dtype_list = ['fp16', 'fp32']
elif args.device.startswith('npu'):
    torch.npu.set_device(args.device)
    prof_kwargs = {'use_npu': True}
    dtype_list = ['fp16', 'fp32']
else:
    prof_kwargs = {}
    dtype_list = ['fp32']

if not os.path.exists('./prof_out'):
    os.makedirs('./prof_out')

for dtype in dtype_list:
    for shape in shape_list:
        N, C, H, W = shape
        x = torch.randn(*shape)
        x.requires_grad = True
        x = x.to(args.device)
        if dtype == 'fp16':
            x = x.half()

        run(x)
        with torch.autograd.profiler.profile(**prof_kwargs) as prof:
            run(x)
        prof.export_chrome_trace("./prof_out/softplus_device_%s_dtype_%s_shape_%d_%d_%d_%d.prof" % (
        args.device, dtype, N, C, H, W))  # "output.prof"为输出文件地址

        if args.device.startswith('npu'):
            x.data = x.data.npu_format_cast(3)
            run(x)
            with torch.autograd.profiler.profile(**prof_kwargs) as prof:
                run(x)
            prof.export_chrome_trace("./prof_out/softplus_device_%s_dtype_%s_shape_%d_%d_%d_%d_5HD.prof" % (
                args.device, dtype, N, C, H, W))  # "output.prof"为输出文件地址

