# https://pytorch.org/docs/2.1/notes/cuda.html

def sep(label='', cnt=32, char='-', rchar=None):
    """Util function to print a separator line with label."""
    if rchar is None:
        rchar = char
    print(char * cnt, label, rchar * cnt, sep='', flush=True)
    
if '__main__' == __name__:

    import torch
    import time

    N = 10

    SEED = 42
    sep(f'torch SEED={SEED}')
    torch.manual_seed(SEED)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(SEED)
        
    sep('double')
    a_full = torch.randn(10240, 10240, dtype=torch.double, device='cuda')
    b_full = torch.randn(10240, 10240, dtype=torch.double, device='cuda')
    t1 = time.time_ns()
    ab_full = a_full @ b_full
    torch.cuda.synchronize()
    t2 = time.time_ns()
    print(f'Time: {(t2 - t1) / 10 ** 6:,f}')
    mean = ab_full.abs().mean()
    print(f'mean = {mean}')

    sep('float')
    a = a_full.float()
    b = b_full.float()

    sep('TF32 OFF')
    # Do matmul with TF32 disabled.
    torch.backends.cuda.matmul.allow_tf32 = True
    # torch.backends.cuda.matmul.allow_tf32 = False
    sum = 0
    for i in range(N):
        n = i + 1
        sep(n)
        torch.cuda.empty_cache()  # Clear CUDA cache
        t1 = time.time_ns()
        ab = (a.detach().clone().to('cuda')) @ (b.detach().clone().to('cuda'))
        torch.cuda.synchronize()
        t2 = time.time_ns()
        duration = (t2 - t1) / 10 ** 6
        print(f'Time: {duration:,f}')
        error = (ab - ab_full).abs().max()
        print(f'error = {error}')
        relative_error = error / mean
        print(f'rel error = {relative_error}')
        sum += duration
    print(f'Average duration = {sum / N:,f}')
