import argparse
import os
import torch
import numpy as np
from numpy import linalg as LA
from PIL import Image
import torch.nn as nn
import torchvision.transforms as transforms
import engine.models.vision_transformer as vits


class VITFeatureExtractor():
    def __init__(self, args):
        self.device = torch.device("cuda:0" if torch.cuda.is_available() and args.use_gpu else "cpu")
        self.model = self.load_model(args)
        self.transforms = transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop((args.crop_size, args.crop_size)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]),
        ])

    def load_model(self, args):
        model = vits.__dict__[args.arch](patch_size=args.patch_size, return_all_tokens=False)
        checkpoint = torch.load(args.weights, map_location="cpu")
        model.load_state_dict(checkpoint['state_dict'], strict=True)
        model.eval()
        return model.to(self.device)

    def _preprocess(self, image):
        if isinstance(image, str):
            image = Image.open(image)
        elif isinstance(image, np.ndarray):
            image = Image.fromarray(image)
            image = image.convert('RGB')
        else:
            raise ValueError("data type not supported")
        inputs = self.transforms(image)
        inputs = inputs.unsqueeze(0).to(self.device)
        return inputs

    def _postprocess(self, out):
        out = out.squeeze(0).detach().cpu().numpy()
        # TODO: do feature normalization
        out = out / LA.norm(out)
        return out

    def extract(self, img):
        inputs = self._preprocess(img)
        outputs = self.model(inputs)
        outputs = self._postprocess(outputs)
        return outputs


def get_parse():
    parser = argparse.ArgumentParser(description='infer')
    parser.add_argument('--arch', default='vit_small', type=str, help='model architecture')
    parser.add_argument('--weights', default='checkpoints/VIT_S_16_Block_teacher.pth', type=str, help='checkpoint')
    parser.add_argument('--use_gpu', default=True, type=bool)
    parser.add_argument('--crop_size', default=224, type=int)
    parser.add_argument('--patch_size', default=16, type=int)
    args = parser.parse_args()
    return args


def build_GL_extractor():
    args = get_parse()
    extract_model = VITFeatureExtractor(args)
    return extract_model


if __name__ == "__main__":
    args = get_parse()
    extract_model = VITFeatureExtractor(args)

    # TODO: img test
    path = '2007_000256.jpg'
    out = extract_model.extract(path)
    print(out.shape)
