from omni_model.omni_space import *
a = OmniBind_Base(pretrained=True)
a = a.cuda()
a = a.eval()
with torch.no_grad():
    aud = a.emb_audios(['assets/train.wav', 'assets/toilet.wav'])
    img = a.emb_images(['assets/train.jpeg', 'assets/toilet.jpeg'])
    txt = a.emb_texts(['a photo of train', 'a photo of toilet'])
    pc = a.emb_points(['assets/train.npy', 'assets/toilet.npy'])
print('shape', aud.shape, img.shape, txt.shape, pc.shape)
print('aud@img.T', aud@img.T)
print(aud@txt.T)
print(aud@pc.T)
print(img@txt.T)
print(img@pc.T)
print(txt@pc.T)

# a = OmniBind_Base()
# # a = a.cuda()
# with torch.no_grad():
#     aud = a.emb_audios(['assets/train.wav', 'assets/toilet.wav'])
#     img = a.emb_images(['assets/train.jpeg', 'assets/toilet.jpeg'])
#     txt = a.emb_texts(['assets/train.jpeg', 'assets/toilet.jpeg'])
#     pc = a.emb_points(['assets/train.npy', 'assets/toilet.npy'])
# print(aud.shape, img.shape, txt.shape, pc.shape)
# print(aud@img.T)
# print(aud@txt.T)
# print(aud@pc.T)
# print(img@txt.T)
# print(img@pc.T)
# print(txt@pc.T)
# print('inf passed')
