import torch
import torch.nn as nn
from ViT import VisionTransformer, getVit32
from BoTNet.ResNet import MHSA
from coordatt import CoordAtt
from torchinfo import summary
import ml_collections
from torch.nn import functional as F
def conv1x1(in_planes, out_planes, stride=1):
    return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)

class BViT(nn.Module):
    def __init__(self, config, image_size, num_classes, zero_head=False, vis=False, planes=3, reduction=8):
        super(BViT, self).__init__()
        self.conv1 = conv1x1(planes, 16)
        self.attn = MHSA(16, width=image_size, height=image_size)
        self.conv2 = conv1x1(16, planes)
        self.vit = VisionTransformer(config, image_size, num_classes, zero_head, vis)
        self.norm = nn.LayerNorm(32)

    def forward(self, X):
        residual = X
        X = self.conv1(X)
        X = self.attn(X)
        X = self.conv2(X)
        X = self.norm(residual + X)
        X, attn_weight = self.vit(X)
        return X, attn_weight

def getBVit32(num_classes=100):
    # Returns the ViT-B/16 configuration
    IMG_SIZE = 32
    config = ml_collections.ConfigDict()
    config.patches = ml_collections.ConfigDict({'size': (4, 4)})
    config.hidden_size = 252
    config.transformer = ml_collections.ConfigDict()
    config.transformer.mlp_dim = 1536
    config.transformer.num_heads = 4
    config.transformer.num_layers = 4
    config.transformer.attention_dropout_rate = 0.0
    config.transformer.dropout_rate = 0.1
    config.classifier = 'token'
    config.representation_size = None

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    model = BViT(config, IMG_SIZE, zero_head=True, num_classes=num_classes)
    model.to(device)

    return model
if __name__ == "__main__":
    model = getVit32(100)
    summary(model, input_size=(128, 3, 32, 32), device='cuda')