"""
Masked AutoEncoder
"""
import os
import torch
import torchvision.transforms as transforms
import numpy as np
from PIL import Image
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from numpy import linalg as LA
from .impl.mae import *
from .impl.pos_embed import interpolate_pos_embed
from .utils import to_pil

if torch.cuda.is_available():
    DEVICE = torch.device('cuda')
else:
    DEVICE = torch.device('cpu')

class MAEFeatureExtractor(object):
    FEATURE_DIM = 768
    def __init__(self, model_name):
        self.input_size = 224
        # load model
        model = eval(model_name)(
            num_classes=1000,
            drop_path_rate=0.1,
            global_pool=False
        )
        mode_infix = 'pretrain'  # finetune
        if model_name == 'vit_base_patch16':
            ckpt_name = 'mae_{}_vit_base.pth'.format(mode_infix)
        elif model_name == 'vit_large_patch16':
            ckpt_name = 'mae_finetuned_vit_large.pth'
        else:
            ckpt_name = 'mae_finetuned_vit_huge.pth'
        # load pretrained weights
        checkpoint = torch.load('./checkpoints/{}'.format(ckpt_name), map_location='cpu')
        print("Load pre-trained checkpoint from: %s" % ckpt_name)

        checkpoint_model = checkpoint['model']
        state_dict = model.state_dict()
        for k in ['head.weight', 'head.bias']:
            if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
                print(f"Removing key {k} from pretrained checkpoint")
                del checkpoint_model[k]

        # interpolate position embedding
        interpolate_pos_embed(model, checkpoint_model)

        # load pre-trained model
        msg = model.load_state_dict(checkpoint_model, strict=False)
        print(msg)

        # global_pool = True
        # if global_pool:
        #     assert set(msg.missing_keys) == {'head.weight', 'head.bias', 'fc_norm.weight', 'fc_norm.bias'}
        # else:
        #     assert set(msg.missing_keys) == {'head.weight', 'head.bias'}
        
        self.model = model.to(DEVICE)
        self.model = self.model.eval()

        # load transforms
        self.transforms = self.build_transform()
    
    def build_transform(self):
        mean = IMAGENET_DEFAULT_MEAN
        std = IMAGENET_DEFAULT_STD
        
        t = []
        if self.input_size <= 224:
            crop_pct = 224 / 256
        else:
            crop_pct = 1.0
        size = int(self.input_size / crop_pct)
        t.append(
            transforms.Resize(size, interpolation=Image.BICUBIC),  # to maintain same ratio w.r.t. 224 images
        )
        t.append(transforms.CenterCrop(self.input_size))

        t.append(transforms.ToTensor())
        t.append(transforms.Normalize(mean, std))
        return transforms.Compose(t)

    def extract(self, image):
        image = to_pil(image)
        inputs = self.transforms(image)
        inputs = inputs.unsqueeze(0)
        with torch.no_grad():
            # outputs = self.model(inputs)
            inputs = inputs.to(DEVICE)
            outputs = self.model.forward_features(inputs)

        outputs = outputs.squeeze(0).detach().cpu().numpy()

        # TODO: do feature normalization
        outputs = outputs / LA.norm(outputs)
        # convert numpy array to list
        # outputs = [float(v) for v in outputs]
        return outputs
