# -*- coding:utf-8 -*-

import cv2
import torch
import torch.nn.functional as F
from torch import nn
from einops.layers.torch import Rearrange
import torch.optim as optim
from glom_pytorch import Glom

from torchvision import transforms
import matplotlib.pyplot as plt
import numpy as np
from torch.autograd import Variable

loader = transforms.Compose(
    [
        transforms.ToTensor()
    ]
)

unloader = transforms.ToPILImage()


def imshow(tensor, title=None):
    image = tensor.cpu().detach().clone()
    image = image.squeeze(0)
    image = unloader(image)
    plt.imshow(image)
    if title is not None:
        plt.title(title)
        # plt.pause(1)
        plt.show()


model = Glom(
    dim=512,         # dimension
    levels=6,        # number of levels
    image_size=224,  # image size
    patch_size=14    # patch size
)

# img = torch.randn(1, 3, 224, 224)
im = cv2.imread("./BSD500/448.jpg")
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
img = torch.from_numpy(
    np.array([im.transpose((2, 0, 1)).astype('float32')/255.]))

img = Variable(img)

# noised_img = img + torch.randn_like(img)
noised_img = img
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9)

maxIter = 3
for batch_idx in range(maxIter):
    all_levels = model(noised_img, return_all=True)

    patches_to_images = nn.Sequential(
        nn.Linear(512, 14 * 14 * 3),
        Rearrange('b (h w) (p1 p2 c) -> b c (h p1) (w p2)',
                  p1=14, p2=14, h=(224 // 14))
    )
    print("all_levels.shape:", all_levels.shape)
    # get the top level embeddings after iteration 6
    top_level = all_levels[7, :, :, -1]
    print("top_level.shape:", top_level.shape)
    recon_img = patches_to_images(top_level)
    print("recon_img.shape:", recon_img.shape)

    # do self-supervised learning by denoising
    loss = F.mse_loss(img, recon_img)

    loss.backward()
    optimizer.step()
imshow(noised_img, "noised_img")
imshow(recon_img, "recon_img")
