# -*- coding:utf-8 -*-

import torch
import torch.nn.functional as F
from torch import nn
from einops.layers.torch import Rearrange

from glom_pytorch import Glom

if torch.cuda.is_available() == True:
    # We check whether cuda is available and choose device accordingly
    device = "cuda:0"
else:
    device = "cpu"

# device = "cpu"

model = Glom(
    dim = 512,         # dimension
    levels = 6,        # number of levels
    image_size = 224,  # image size
    patch_size = 14,    # patch size
    device=device
).to(device)

img = torch.randn(1, 3, 224, 224).to(device)
noised_img = img + torch.randn_like(img)

noised_img.to(device)

all_levels = model(noised_img, return_all = True)

patches_to_images = nn.Sequential(
    nn.Linear(512, 14 * 14 * 3),
    Rearrange('b (h w) (p1 p2 c) -> b c (h p1) (w p2)', p1 = 14, p2 = 14, h = (224 // 14))
)

top_level = all_levels[7, :, :, -1]  # get the top level embeddings after iteration 6

patches_to_images.to(device)

recon_img = patches_to_images(top_level)

# do self-supervised learning by denoising

loss = F.mse_loss(img, recon_img)
loss.backward()

print(recon_img)