import torch
import torch.nn as nn
from functools import partial

from .LoRA import linear_layer_parameterization
from .vit import vit_small
import torch.nn.utils.parametrize as parametrize


class BackBone(nn.Module):
    def __init__(self, args):
        super(BackBone, self).__init__()
        self.encoder = vit_small()
        self.hooks = []
        for name, module in self.encoder.named_modules():
            if 'attn_drop' in name:
                self.hooks.append(module.register_forward_hook(self.get_attention))
        self.attentions = []

    def get_attention(self, module, input, output):
        self.attentions.append(output.cpu())

    def forward(self, support, query):
        # feature extraction
        support = self.encoder(support)
        query = self.encoder(query)
        return support, query

