# the test idea is from pytorch tutorials
import torch
import timeit

def batched_dot_mul_sum(a, b):
    return a.mul(b).sum(-1)

def batched_dot_bmm(a, b):
    a = a.reshape(-1, 1, a.shape[-1])
    b = b.reshape(-1, b.shape[-1], 1)
    return torch.bmm(a, b).flatten(-3)

if torch.cuda.is_available():
    d = torch.device("cuda")
    print("Use cuda for testing")
elif torch.backends.mps.is_available():
    d = torch.device("mps")
    print("Use mps for testing")
else:
    print("Use CPU for testing")
    d = torch.device("cpu")

x = torch.randn(10000, 640, device=d)

# verify the calculation is correct
assert batched_dot_mul_sum(x, x).allclose(batched_dot_bmm(x, x))

def test(n, x, num):
    t = timeit.Timer(
        stmt = f'{n}(x, x)',
        setup = 'from __main__ import ' + n,
        globals={'x':x})
    t.timeit(num)
    print(f"{n}:", t.timeit(100))

test('batched_dot_mul_sum', x, 100)
test('batched_dot_bmm', x, 100)
