from models import MODEL
import math
import os
import torch
import torch.nn as nn
from .visionFM_layers import VisionTransformer, load_pretrained_weights,trunc_normal_

from utils.loger_config import get_logger
import numpy as np
logger = get_logger()
import numpy as np



# ========== ViT Base Encoder ==========
@MODEL.register_module
def visionFM_vit_base(pretrained=False,
                              model_path='',
                              patch_size=16,
                              checkpoint_key=None,
                              depth=12,
                              **kwargs):
    model = VisionTransformer(
        patch_size=patch_size, embed_dim=768, depth=depth, num_heads=12, mlp_ratio=4,
        qkv_bias=True, **kwargs
    )
    if pretrained:
        if model_path is None:
            raise ValueError("Please provide the model_path for the pretrained model.")
        logger.info(f"Load pretrained weights from {model_path}")
        load_pretrained_weights(model, model_path, checkpoint_key, 'vit_base', patch_size)
    
    return model


