
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"

import torch
from torch.utils import benchmark
type = torch.float32
n = 1024 * 16
a = torch.randn(n,n).type(type).cuda()
b = torch.randn(n,n).type(type).cuda()
t = benchmark.Timer(stmt='a @ b', globals={'a': a, 'b': b})
x = t.timeit(50)
print(2*n**3 / x.median / 1e12)


"""
nvidia-smi -q -d clock
sudo nvidia-smi -q -d SUPPORTED_CLOCKS
sudo nvidia-smi -i 0 --query-supported-clocks=mem,gr --format=csv 
nvidia-smi --format=csv --query-gpu=clocks.max.graphics,clocks.max.sm,clocks.max.memory
sudo nvidia-smi -i 0 -ac 9751,2100
sudo nvidia-smi -i 0 -ac 9501,1920
sudo nvidia-smi -i 0 -ac 7000,2145

sudo nvidia-smi  -ac 9751,2100


nvidia-smi -q -d clock
sudo nvidia-smi -lgc 2100,2100


nvidia-smi -lmc 9751

sudo nvidia-smi base-clocks

nvidia-smi -i 0 -q -d MEMORY,UTILIZATION,POWER,CLOCK,COMPUTE
nvidia-smi --auto-boost-default=ENABLED -i 0

nvidia-smi -rac

nvidia-smi -rgc
nvidia-smi -rmc

sudo nvidia-smi -pm 1 

nvidia-smi -pl 350

nvidia-smi stats -i 0 -d pwrDraw


fp16
./cublasMatmulBench -P=hsh -m=12288 -n=9216 -k=32768 -T=1000 -tb=1 -B=0

fp32
./cublasMatmulBench -P=sss -m=3456 -n=2048 -k=16384 -T=1000 -tb=1 -B=0

tf32
./cublasMatmulBench -P=sss_fast_tf32 -m=8192 -n=3456 -k=16384 -T=1000 -ta=1 -B=0

fp64 
./cublasMatmulBench -P=ddd -m=3456 -n=2048 -k=16384 -T=1000 -tb=1 -B=0

"""
