# https://pytorch.org/docs/2.1/notes/cuda.html

def sep(label='', cnt=32, char='-', rchar=None):
    """Util function to print a separator line with label."""
    if rchar is None:
        rchar = char
    print(char * cnt, label, rchar * cnt, sep='', flush=True)
    
if '__main__' == __name__:

    import torch
    from torch.cuda import Event

    N = 10

    SEED = 42
    sep(f'torch SEED={SEED}')
    torch.manual_seed(SEED)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(SEED)
    
    t1 = Event(enable_timing=True)
    t2 = Event(enable_timing=True)
        
    sep('double')
    a_full = torch.randn(10240, 10240, dtype=torch.double, device='cuda')
    b_full = torch.randn(10240, 10240, dtype=torch.double, device='cuda')
    t1.record()
    ab_full = a_full @ b_full
    # RuntimeError: CUDA error: device not ready
    # torch.cuda.synchronize()
    # t2.record()
    t2.record()
    torch.cuda.synchronize()
    
    print(f'Time: {t1.elapsed_time(t2):,f}')
    mean = ab_full.abs().mean()
    print(f'mean = {mean}')

    sep('float')
    a = a_full.float()
    b = b_full.float()

    sep('TF32 ON')
    # Do matmul at TF32 mode.
    # torch.backends.cuda.matmul.allow_tf32 = True  # 158ms
    torch.backends.cuda.matmul.allow_tf32 = False  # 235ms
    sum = 0
    for i in range(N):
        n = i + 1
        sep(n)
        torch.cuda.empty_cache()  # Clear CUDA cache
        t1.record()
        # ab = (a.detach().clone().to('cuda')) @ (b.detach().clone().to('cuda'))
        ab = a @ b
        t2.record()
        torch.cuda.synchronize()
        duration = t1.elapsed_time(t2)
        print(f'Time: {duration:,f}')
        error = (ab - ab_full).abs().max()
        print(f'error = {error}')
        relative_error = error / mean
        print(f'rel error = {relative_error}')
        sum += duration
    print(f'Average duration = {sum / N:,f}')
