import timeit

import torch
import csv

'''
参考文档：
https://github.com/pytorch/pytorch/issues/77753

https://github.com/pytorch/pytorch/issues/77799
'''

batch_size = 0

def betch_mark(batch_sise):
    a_cpu = torch.rand(batch_size, device='cpu')
    b_cpu = torch.rand((batch_size, batch_size), device='cpu')
    # a_mps = torch.rand(batch_size, device='mps')
    # b_mps = torch.rand((batch_size, batch_size), device='mps')
    print(f'batch_size: {batch_size}')
    print('cpu', timeit.timeit(lambda: a_cpu @ b_cpu, number=100_000))
    return (batch_size, timeit.timeit(lambda: a_cpu @ b_cpu, number=100_000))
    # print('mps', timeit.timeit(lambda: a_mps @ b_mps, number=100_000))
    # return (batch_size, timeit.timeit(lambda: a_mps @ b_mps, number=100_000))


filename='output/mps_benchmark.csv'
with open(filename, 'w', newline='') as csvfile:
    writer = csv.writer(csvfile)

    # 使用 for 循环遍历数据列表，并将每行数据写入 CSV 文件
    for i in range(30):
        batch_size += 250
        writer.writerow(betch_mark(batch_size))

'''
/Users/liuzexiang/Downloads/数据/neural-datalog-through-time/venv/bin/python /Applications/PyCharm.app/Contents/plugins/python/helpers/pydev/pydevconsole.py --mode=client --host=127.0.0.1 --port=52273 
import sys; print('Python %s on %s' % (sys.version, sys.platform))
sys.path.extend(['/Users/liuzexiang/Downloads/neural-datalog-through-time'])
PyDev console: starting.
Python 3.12.2 (v3.12.2:6abddd9f6a, Feb  6 2024, 17:02:06) [Clang 13.0.0 (clang-1300.0.29.30)] on darwin
runfile('/Users/liuzexiang/Downloads/neural-datalog-through-time/ndtt/test/testTorchGPU_Slow.py', wdir='/Users/liuzexiang/Downloads/neural-datalog-through-time/ndtt/test')
batch_size: 250
cpu 0.3889189999899827
batch_size: 500
cpu 0.7279081669985317
batch_size: 750
cpu 1.3258026670082472
batch_size: 1000
cpu 2.1103827499900945
batch_size: 1250
cpu 4.294814541994128
batch_size: 1500
cpu 6.052866041951347
batch_size: 1750
cpu 8.334245583973825
batch_size: 2000
cpu 15.411738166993018
batch_size: 2250
cpu 26.843187499966007
batch_size: 2500
cpu 52.66884608397959
batch_size: 2750
cpu 87.86110879201442
batch_size: 3000
cpu 128.10327529202914
batch_size: 3250
cpu 179.44473291700706
batch_size: 3500


'''



'''
/Users/liuzexiang/Downloads/数据/neural-datalog-through-time/venv/bin/python /Applications/PyCharm.app/Contents/plugins/python/helpers/pydev/pydevconsole.py --mode=client --host=127.0.0.1 --port=65503 
import sys; print('Python %s on %s' % (sys.version, sys.platform))
sys.path.extend(['/Users/liuzexiang/Downloads/neural-datalog-through-time'])
PyDev console: starting.
Python 3.12.2 (v3.12.2:6abddd9f6a, Feb  6 2024, 17:02:06) [Clang 13.0.0 (clang-1300.0.29.30)] on darwin
runfile('/Users/liuzexiang/Downloads/neural-datalog-through-time/ndtt/test/testTorchGPU_Slow.py', wdir='/Users/liuzexiang/Downloads/neural-datalog-through-time/ndtt/test')
batch_size: 250
mps 4.100422000003164
batch_size: 500
mps 4.080347291994258
batch_size: 750
mps 4.073470916002407
batch_size: 1000
mps 4.089500667003449
batch_size: 1250
mps 5.169591375000891
batch_size: 1500
mps 9.522188125003595
batch_size: 1750
mps 15.924646125000436
batch_size: 2000
mps 18.583592750001117
batch_size: 2250
mps 25.86609745801252
batch_size: 2500
mps 29.068538042003638
batch_size: 2750
mps 42.4321448749979
batch_size: 3000
mps 44.21487108300789
batch_size: 3250
mps 58.29482762499538
batch_size: 3500
mps 55.23399154099752
batch_size: 3750
mps 75.02093108301051
batch_size: 4000
mps 72.67628933399101
batch_size: 4250
mps 97.27706720799324
batch_size: 4500
mps 99.92375766599434
batch_size: 4750
mps 119.79501266599982
batch_size: 5000
mps 120.6691995000001
batch_size: 5250
mps 122.88539699999092
batch_size: 5500
mps 155.44102441599534
batch_size: 5750
mps 187.41549562499858
batch_size: 6000
mps 173.856170250001
batch_size: 6250
mps 213.3440547920036
batch_size: 6500
mps 194.83780295899487
batch_size: 6750
mps 256.3326130420028
batch_size: 7000
mps 243.90283362500486
batch_size: 7250
mps 292.01396795800247
batch_size: 7500
mps 265.7475717089983
'''

'''
batch_size: 250
cuda 5.0044763000000785
batch_size: 500
cuda 4.133261400000038
batch_size: 750
cuda 4.215200100000175
batch_size: 1000
cuda 3.8452314999999544
batch_size: 1250
cuda 4.854550899999822
batch_size: 1500
cuda 6.746834799999988
batch_size: 1750
cuda 8.851739499999894
batch_size: 2000
cuda 11.00123960000019
batch_size: 2250
cuda 14.04098840000006
batch_size: 2500
cuda 16.637339200000042
batch_size: 2750
cuda 19.92058320000001
batch_size: 3000
cuda 23.56543039999997
batch_size: 3250
cuda 27.754110000000082
batch_size: 3500
cuda 32.51361330000009
batch_size: 3750
cuda 37.96145780000006
batch_size: 4000
cuda 41.39952960000005
batch_size: 4250
cuda 56.61455729999989
batch_size: 4500
cuda 61.35225990000026
batch_size: 4750
cuda 66.40860609999982
batch_size: 5000
cuda 69.03906920000009
batch_size: 5250
cuda 76.96555099999978
batch_size: 5500
cuda 82.67080440000018
batch_size: 5750
cuda 91.61842449999995
batch_size: 6000
cuda 94.11740389999977
batch_size: 6250
cuda 106.72274050000033
batch_size: 6500
cuda 113.71815000000015
batch_size: 6750
cuda 125.76201110000011
batch_size: 7000
cuda 126.25853289999986
batch_size: 7250
cuda 142.58949310000025
batch_size: 7500
cuda 151.13141910000013




'''