# -*- coding:utf-8 -*-

import torch
import torch.nn.functional as F
from torch import nn
import torch.optim.lr_scheduler as lr_scheduler
import torch.optim as optim
from einops.layers.torch import Rearrange

from glom_pytorch import Glom
from tqdm import tqdm


class TotalCodingRate(nn.Module):
    def __init__(self, eps=0.01):
        super(TotalCodingRate, self).__init__()
        self.eps = eps

    def compute_discrimn_loss(self, W):
        """Discriminative Loss."""
        p, m = W.shape  # [d, B]
        I = torch.eye(p, device=W.device)
        scalar = p / (m * self.eps)
        logdet = torch.logdet(I + scalar * W.matmul(W.T))
        return logdet / 2.

    def forward(self, X):
        return - self.compute_discrimn_loss(X.T)


class MaximalCodingRateReduction(torch.nn.Module):
    def __init__(self, eps=0.01, gamma=1):
        super(MaximalCodingRateReduction, self).__init__()
        self.eps = eps
        self.gamma = gamma

    def compute_discrimn_loss(self, W):
        """Discriminative Loss."""
        p, m = W.shape
        I = torch.eye(p, device=W.device)
        scalar = p / (m * self.eps)
        logdet = torch.logdet(I + scalar * W.matmul(W.T))
        return logdet / 2.

    def compute_compress_loss(self, W, Pi):
        p, m = W.shape
        k, _, _ = Pi.shape
        I = torch.eye(p, device=W.device).expand((k, p, p))
        trPi = Pi.sum(2) + 1e-8
        scale = (p / (trPi * self.eps)).view(k, 1, 1)

        W = W.view((1, p, m))
        log_det = torch.logdet(I + scale * W.mul(Pi).matmul(W.transpose(1, 2)))
        compress_loss = (trPi.squeeze() * log_det / (2 * m)).sum()
        return compress_loss

    def forward(self, X, Y, num_classes=None):
        # This function support Y as label integer or membership probablity.
        if len(Y.shape) == 1:
            # if Y is a label vector
            if num_classes is None:
                num_classes = Y.max() + 1
            Pi = torch.zeros((num_classes, 1, Y.shape[0]), device=Y.device)
            for indx, label in enumerate(Y):
                Pi[label, 0, indx] = 1
        else:
            # if Y is a probility matrix
            if num_classes is None:
                num_classes = Y.shape[1]
            Pi = Y.T.reshape((num_classes, 1, -1))

        W = X.T
        discrimn_loss = self.compute_discrimn_loss(W)
        compress_loss = self.compute_compress_loss(W, Pi)

        total_loss = - discrimn_loss + self.gamma * compress_loss
        return total_loss, [discrimn_loss.item(), compress_loss.item()]


if torch.cuda.is_available() == True:
    # We check whether cuda is available and choose device accordingly
    device = "cuda:0"
else:
    device = "cpu"

# device = "cpu"

model = Glom(
    dim = 64,         # dimension
    levels = 2,        # number of levels
    image_size = 250,  # image size
    patch_size = 50,    # patch size
    consensus_self=False,
    local_consensus_radius=0,
    image_chanel=1,
    return_state=2,
    device=device
).to(device)

img = torch.randn(1, 1, 250, 250).to(device)
noised_img = img + torch.randn_like(img)
noised_img.to(device)

patches_to_images = nn.Sequential(
    nn.Linear(64, 50 * 50 * 1),
    Rearrange('b (h w) (p1 p2 c) -> b c (h p1) (w p2)', p1 = 50, p2 = 50, h = (250 // 50))
)
patches_to_images.to(device)

EPOCHS = 10

autoencoder_params = list(model.parameters()) + list(patches_to_images.parameters())
opt = optim.SGD(autoencoder_params, lr=0.005, momentum=0.9, weight_decay=1e-4,nesterov=True)
num_converge = (50000 // 2) * EPOCHS

scheduler = lr_scheduler.CosineAnnealingLR(opt, T_max=num_converge, eta_min=0, last_epoch=-1)

criterion = TotalCodingRate(eps=0.2)

for epoch in tqdm(range(EPOCHS)):
    model.zero_grad()
    patches_to_images.zero_grad()
    opt.zero_grad()


    all_levels = model(noised_img, return_all = 1)

    top_level = all_levels[3, :, :, -1]  # get the top level embeddings after iteration 6

    reconstruction_img = patches_to_images(top_level)
    print(top_level.shape)
    # print(reconstruction_img.shape)
    # do self-supervised learning by denoising

    loss = F.mse_loss(img, reconstruction_img)
    # loss = criterion(reconstruction_img)
    loss.backward()
    opt.step()
    scheduler.step()

# print(reconstruction_img)

from matplotlib import pyplot as plt
import os

os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"


f, axes = plt.subplots(1, 2, figsize=(15, 12))
color_theme = 'YlOrBr'
img_id = 0
plt.subplots_adjust(left=None, bottom=None, right=None, top=None,
                    wspace=0.4, hspace=0.6)
orgin_img = img.detach().cpu().numpy()[img_id, :, :, :].reshape(250, 250, 1)
recon_img = reconstruction_img.detach().cpu().numpy()[img_id, :, :, :].reshape(250, 250, 1)

axes[0].set_title("Original Image")
axes[1].set_title("Reconstruction Image")
axes[0].imshow(orgin_img, cmap=plt.cm.get_cmap(color_theme))
axes[1].imshow(recon_img, cmap=plt.cm.get_cmap(color_theme))
plt.show()
