import torch


vla_vit_config = dict(
    img_size=224,
    patch_size=14,
    in_chans=3,
    num_heads=16,
    mlp_ratio=4.0,
    embed_dim=1024,
    reg_tokens=4,
    class_token=True,
    init_values=1e-05,
    depth=24,
    global_pool_map=False,
)

vla_vit_fused_config = dict(
    img_size=224,
    patch_size=14,
    in_chans=3,
    num_heads=16,
    mlp_ratio=3.7362,
    embed_dim=1152,
    reg_tokens=0,
    class_token=False,
    init_values=None,
    depth=27,
    global_pool_map=True,
)

vla_vit_fused_config_new = dict(
    img_size=224,
    patch_size=14,
    in_chans=3,
    num_heads=16,
    mlp_ratio=4,
    embed_dim=1024,
    reg_tokens=0,
    class_token=True,
    init_values=None,
    depth=24,
    global_pool_map=False,
    eps=1e-5,
    pre_norm=True,
)

proj_config = dict(vision_dim=2176, llm_dim=4096)
proj_config_new = dict(vision_dim=2048, llm_dim=4096)

vision_config = {
    "old": [vla_vit_config, vla_vit_fused_config, proj_config],
    "new": [vla_vit_config, vla_vit_fused_config_new, proj_config_new],
}


def build_vision_tower(
    config_type: str, state: list[dict], device="cuda", dtype=torch.bfloat16
):
    assert config_type in vision_config, config_type
    from .modeling_vit import ViT, VLAProjector

    cfg = vision_config[config_type]
    raw_modules = [ViT(**cfg[0]), ViT(**cfg[1]), VLAProjector(**cfg[2])]
    for m, s in zip(raw_modules, state):
        m.load_state_dict(s)
        if isinstance(m,ViT):
            m.patch_for_vla()
    
        
    
    return [m.to(device=device, dtype=dtype) for m in raw_modules]
