#!py -3
# coding:utf-8

import sys
import torch
import torch.nn as nn
from torch.utils.checkpoint import checkpoint

def main():
    
    layercnt = 600
    istestcheckpoint = False;
    
    if len(sys.argv) == 2 and sys.argv[1] == '-c' :
        istestcheckpoint = True
    
    if not istestcheckpoint :
        print("default no checkpoint testing,%d Conv2d(1,1).\n" % layercnt)
        print('receive -c arg to test pytorch checkpoint.',"\n\n")
    else :
        print("use checkpoint testing,%d Conv2d(1,1)." % layercnt)
        print("\n\n")
    
    print("starting test.")
    sys.stdout.flush() # 刷新stdout缓冲区 使print 立即显示在屏上
    
    device = torch.device('cuda')
    input = torch.rand(1,1,512,512,device=device)
    layer1 = nn.Conv2d(1,10,1,1).to(device)
    layers = [ nn.Conv2d(10,10,1,1).to(device) for _ in range(layercnt) ]
    
    def run_first_half(*argv):
        x = argv[0]
        for i in range(layercnt//2):
            x = layers[i](x)
        return x
        
    def run_second_half(*argv):
        x = argv[0]
        for l in layers[layercnt//2 : ] :
            x = l(x)
        return x    
    
    if not istestcheckpoint :
        model = nn.Sequential(*layers)

        with torch.autograd.profiler.profile(use_cuda = True) as prof:
            # 这样大概要占用6G显存
            model.forward(layer1(input)).sum().backward()


        #将返回当前Tensor占用的GPU内存（以字节为单位)，
        # print(int(torch.cuda.memory_allocated()/1024/1024))   # 目录状态所占用的显存

        print('resault max_memory_cached = ',torch.cuda.max_memory_cached()/1024/1024,'MB') #看到已经分配的最大cuda显存
        print(print(prof.key_averages().table(sort_by="cuda_time_total")))  # self_cpu_time_total ``cpu_time``, ``cuda_time``, ``cpu_time_total``,``cuda_time_total``, ``count`` ,
        
        # torch.cuda.empty_cache() # 手动释放显存
        # model.forward(layer1(torch.rand(1,1,10,10,device=device)))
        # print('max_memory_cached = ',torch.cuda.max_memory_cached()/1024/1024,'MB') #看到已经分配的最大cuda显存
        
    else :
    
        with torch.autograd.profiler.profile(use_cuda = True) as prof:
            # checkpoint中包裹的tensor 在前向运算中是以torch.no_grad方法计算的，不保存用于后向运算的中间变量
            # 但是后向运算的时候，它会用输入元组和函数追踪并重新计中间变量以获得梯度值
            # 注意 checkpoint的输入元组参数中必须有一个grad=true 否则后向运算不会被执行
            # checkpoint 不会检测到前向后前运算不相同的情况，如果前后向运算不一样，checkpoint会使后向结果出错
            x = checkpoint(run_first_half,layer1(input))    
            x = checkpoint(run_second_half,x)
            x.sum().backward()      # 这样大概要占用3G显存
            
        print('resault max_memory_cached = ',torch.cuda.max_memory_cached()/1024/1024,'MB') #看到已经分配的最大cuda显存
        print(print(prof.key_averages().table(sort_by="cuda_time_total")))  # self_cpu_time_total ``cpu_time``, ``cuda_time``, ``cpu_time_total``,``cuda_time_total``, ``count`` ,
        

if __name__ == '__main__':
    main()