# -*- coding:utf-8 -*-

import torch
import torch.nn.functional as F
from torch import nn
from einops.layers.torch import Rearrange

from glom_pytorch import Glom

if torch.cuda.is_available() == True:
    # We check whether cuda is available and choose device accordingly
    device = "cuda:0"
else:
    device = "cpu"

# device = "cpu"

model = Glom(
    dim = 128,         # dimension
    levels = 6,        # number of levels
    image_size = 250,  # image size
    patch_size = 10,    # patch size
    consensus_self=False,
    local_consensus_radius=0,
    image_chanel=1,
    return_state=1,
    device=device
).to(device)

img = torch.randn(8, 1, 250, 250).to(device)
noised_img = img + torch.randn_like(img)

noised_img.to(device)

all_levels = model(noised_img, return_all = 1)

patches_to_images = nn.Sequential(
    nn.Linear(128, 10 * 10 * 1),
    Rearrange('b (h w) (p1 p2 c) -> b c (h p1) (w p2)', p1 = 10, p2 = 10, h = (250 // 10))
)

top_level = all_levels[5, :, :, -1]  # get the top level embeddings after iteration 6

patches_to_images.to(device)

recon_img = patches_to_images(top_level)
print(top_level.shape)
print(recon_img.shape)
# do self-supervised learning by denoising

loss = F.mse_loss(img, recon_img)
loss.backward()

print(recon_img)