|
import torch |
|
from torch import nn |
|
from transformers import AutoConfig, PretrainedConfig, AutoModel, PreTrainedModel |
|
from transformers.models.auto import AutoConfig, CONFIG_MAPPING, MODEL_MAPPING |
|
from transformers.utils import logging |
|
from transformers.modeling_utils import ModelOutput |
|
from huggingface_hub import PyTorchModelHubMixin |
|
|
|
from torch_geometric.data import Batch |
|
from model_components import EfficientNetV2FeatureExtractor, GATGNN, TransformerEncoder, MLPBlock |
|
from graph_construction import build_graph_from_patches, build_graph_data_from_patches |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class SAGViTConfig(PretrainedConfig): |
|
model_type = "sagvit" |
|
|
|
def __init__(self, **kwargs): |
|
super().__init__(**kwargs) |
|
self.d_model = kwargs.get("d_model", 64) |
|
self.dim_feedforward = kwargs.get("dim_feedforward", 64) |
|
self.gcn_hidden = kwargs.get("gcn_hidden", 128) |
|
self.gcn_out = kwargs.get("gcn_out", 64) |
|
self.hidden_mlp_features = kwargs.get("hidden_mlp_features", 64) |
|
self.in_channels = kwargs.get("in_channels", 2560) |
|
self.nhead = kwargs.get("nhead", 4) |
|
self.num_classes = kwargs.get("num_classes", 10) |
|
self.num_layers = kwargs.get("num_layers", 2) |
|
self.patch_size = kwargs.get("patch_size", (4, 4)) |
|
|
|
|
|
class SAGViTClassifier(PreTrainedModel): |
|
""" |
|
SAG-ViT: Scale-Aware Graph Attention Vision Transformer |
|
|
|
This model integrates the following steps: |
|
- Extract multi-scale features from images using a CNN backbone (EfficientNetv2 here). |
|
- Partition the feature map into patches and build a graph where each node is a patch. |
|
- Use a Graph Attention Network (GAT) to refine patch embeddings based on local spatial relationships. |
|
- Utilize a Transformer encoder to model long-range dependencies and integrate multi-scale information. |
|
- Finally, classify the resulting representation into desired classes. |
|
|
|
Inputs: |
|
- x (Tensor): Input images (B, 3, H, W) |
|
|
|
Outputs: |
|
- out (Tensor): Classification logits (B, num_classes) |
|
""" |
|
|
|
config_class = SAGViTConfig |
|
def __init__(self, config): |
|
super().__init__(config) |
|
|
|
self.patch_size = config.patch_size |
|
self.num_classes = config.num_classes |
|
|
|
|
|
self.cnn = EfficientNetV2FeatureExtractor() |
|
|
|
|
|
self.gcn = GATGNN( |
|
in_channels=config.in_channels, |
|
hidden_channels=config.gcn_hidden, |
|
out_channels=config.gcn_out, |
|
) |
|
|
|
|
|
self.positional_embedding = nn.Parameter(torch.randn(1, 1, config.d_model)) |
|
|
|
self.extra_embedding = nn.Parameter(torch.randn(1, config.d_model)) |
|
|
|
|
|
self.transformer_encoder = TransformerEncoder( |
|
d_model=config.d_model, |
|
nhead=config.nhead, |
|
num_layers=config.num_layers, |
|
dim_feedforward=config.dim_feedforward, |
|
) |
|
|
|
|
|
self.mlp = MLPBlock(config.d_model, config.hidden_mlp_features, config.num_classes) |
|
|
|
def forward(self, x, **kwargs): |
|
|
|
feature_map = self.cnn(x) |
|
|
|
|
|
G_global_batch, patches = build_graph_from_patches(feature_map, self.patch_size) |
|
|
|
|
|
data_list = build_graph_data_from_patches(G_global_batch, patches) |
|
device = x.device |
|
batch = Batch.from_data_list(data_list).to(device) |
|
|
|
|
|
x_gcn = self.gcn(batch) |
|
|
|
|
|
|
|
B = x.size(0) |
|
D = x_gcn.size(-1) |
|
|
|
|
|
|
|
patch_embeddings = x_gcn.unsqueeze(1) |
|
|
|
|
|
patch_embeddings = patch_embeddings + self.positional_embedding |
|
|
|
|
|
patch_embeddings = torch.cat([patch_embeddings, self.extra_embedding.unsqueeze(0).expand(B, -1, -1)], dim=1) |
|
|
|
|
|
x_trans = self.transformer_encoder(patch_embeddings) |
|
|
|
|
|
x_pooled = x_trans.mean(dim=1) |
|
|
|
|
|
logits = self.mlp(x_pooled) |
|
return ModelOutput(logits=logits) |
|
|
|
|
|
CONFIG_MAPPING.register("sagvit", SAGViTConfig) |
|
MODEL_MAPPING.register(SAGViTConfig, SAGViTClassifier) |
|
|