# -*- coding:utf-8 -*-

import torch
import torch.nn.functional as F
from torch import nn
from einops.layers.torch import Rearrange

from glom_pytorch import Glom

if torch.cuda.is_available() == True:
    # We check whether cuda is available and choose device accordingly
    device = "cuda:0"
else:
    device = "cpu"

# device = "cpu"

model = Glom(
    dim = 128,         # dimension
    levels = 6,        # number of levels
    image_size = 250,  # image size
    patch_size = 10,    # patch size
    consensus_self=False,
    local_consensus_radius=0,
    image_chanel=1,
    return_state=1,
    device=device
).to(device)

img = torch.randn(8, 1, 250, 250).to(device)
noised_img = img + torch.randn_like(img)

noised_img.to(device)

patches_to_images = nn.Sequential(
    nn.Linear(128, 10 * 10 * 1),
    Rearrange('b (h w) (p1 p2 c) -> b c (h p1) (w p2)', p1 = 10, p2 = 10, h = (250 // 10))
)


all_levels = model(noised_img, return_all = 1)


top_level = all_levels[7, :, :, -1]  # get the top level embeddings after iteration 6

patches_to_images.to(device)

reconstruction_img = patches_to_images(top_level)
print(top_level.shape)
print(reconstruction_img.shape)
# do self-supervised learning by denoising

loss = F.mse_loss(img, reconstruction_img)
loss.backward()

# print(reconstruction_img)

from matplotlib import pyplot as plt
import os

os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"


f, axes = plt.subplots(1, 2, figsize=(15, 12))
color_theme = 'YlOrBr'
img_id = 0
plt.subplots_adjust(left=None, bottom=None, right=None, top=None,
                    wspace=0.4, hspace=0.6)
orgin_img = img.detach().cpu().numpy()[img_id, :, :, :].reshape(250, 250, 1)
recon_img = reconstruction_img.detach().cpu().numpy()[img_id, :, :, :].reshape(250, 250, 1)

axes[0].set_title("Original Image")
axes[1].set_title("Reconstruction Image")
axes[0].imshow(orgin_img, cmap=plt.cm.get_cmap(color_theme))
axes[1].imshow(recon_img, cmap=plt.cm.get_cmap(color_theme))
plt.show()
