import torch
from torch import nn
from lightly.models import utils
import masked_autoencoder
from collections import OrderedDict

class CoSMAE(nn.Module):
    def __init__(self, vit, mr=0.75,channels=3):
        super().__init__()

        self.use_cls_token = True
        decoder_dim = 512
        self.mask_ratio = mr
        self.patch_size = vit.patch_size
        if self.use_cls_token:
            self.sequence_length = vit.seq_length
        else:
            self.sequence_length = vit.seq_length - 1
        self.mask_token = nn.Parameter(torch.zeros(1, 1, decoder_dim))
        self.backbone = masked_autoencoder.MAEBackbone.from_vit(vit,self.use_cls_token)
        self.decoder = masked_autoencoder.MAEDecoder(
            seq_length=self.sequence_length, #vit.seq_length,
            num_layers= 8,#4,# 8,
            num_heads=16, #16,
            embed_input_dim=vit.hidden_dim,
            hidden_dim=decoder_dim,
            mlp_dim=decoder_dim * 4,
            out_dim=vit.patch_size**2 * channels,# 10,
            dropout=0,
            attention_dropout=0,
        )
        self.Projector = nn.Sequential(OrderedDict([
            ('Proj_Lin_1', nn.Linear(in_features=768,out_features=128)),
            ('Proj_ReLU_1', nn.ReLU()),
            ('Proj_Lin_2', nn.Linear(in_features=128, out_features=768)),
        ]))
    def forward_encoder(self, images, idx_keep=None):
        return self.backbone.encode(images, idx_keep)

    def forward_decoder(self, x_encoded, idx_keep, idx_mask):
        # build decoder input
        batch_size = x_encoded.shape[0]
        x_decode = self.decoder.embed(x_encoded)
        x_masked = utils.repeat_token(
            self.mask_token, (batch_size, self.sequence_length)
        )
        x_masked = utils.set_at_index(x_masked, idx_keep, x_decode.type_as(x_masked))

        # decoder forward pass
        x_decoded = self.decoder.decode(x_masked)

        # predict pixel values for masked tokens
        x_pred = utils.get_at_index(x_decoded, idx_mask)
        x_pred = self.decoder.predict(x_pred)
        return x_pred

    def forward(self, images):
        batch_size = images.shape[0]
        #when there is no class_token, mask_class_token = True so that all tokens can get masked
        idx_keep, idx_mask = utils.random_token_mask(
            size=(batch_size, self.sequence_length),
            mask_ratio=self.mask_ratio,
            mask_class_token = not self.use_cls_token,
            device=images.device,
        )
        x_encoded = self.forward_encoder(images, idx_keep)
        x_pred = self.forward_decoder(x_encoded, idx_keep, idx_mask)

        # get image patches for masked tokens
        patches = utils.patchify(images, self.patch_size)
        #print(patches.size())
        # must adjust idx_mask for missing class token
        if self.use_cls_token:
            target = utils.get_at_index(patches, idx_mask - 1)
        else:
            target = utils.get_at_index(patches, idx_mask)

        
        
        if self.use_cls_token:
            CLS = x_encoded[:, 0]
            enc_out = CLS
            #x_proj = self.Projector(CLS)
        else:
            GAP = x_encoded.mean(dim=1)
            enc_out = GAP
        #print(enc_out.size())
        x_proj = self.Projector(enc_out)
        return x_pred, target, enc_out, x_proj