salma-remyx
commited on
Commit
•
1267479
1
Parent(s):
5c79db4
Upload config
Browse files- config.json +39 -0
- configuration_prismatic.py +141 -0
config.json
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"arch_specifier": "no-align+gelu-mlp",
|
3 |
+
"architectures": [
|
4 |
+
"PrismaticForConditionalGeneration"
|
5 |
+
],
|
6 |
+
"auto_map": {
|
7 |
+
"AutoConfig": "configuration_prismatic.PrismaticConfig"
|
8 |
+
},
|
9 |
+
"hf_llm_id": "meta-llama/Meta-Llama-3.1-8B",
|
10 |
+
"image_resize_strategy": "letterbox",
|
11 |
+
"image_sizes": [
|
12 |
+
224,
|
13 |
+
224
|
14 |
+
],
|
15 |
+
"llm_backbone_id": "llama3-1-8b-pure",
|
16 |
+
"llm_max_length": 2048,
|
17 |
+
"model_type": "prismatic",
|
18 |
+
"output_projector_states": false,
|
19 |
+
"pad_to_multiple_of": 64,
|
20 |
+
"pad_token_id": 128256,
|
21 |
+
"text_config": {
|
22 |
+
"model_type": "llama",
|
23 |
+
"pad_token_id": 128256,
|
24 |
+
"torch_dtype": "bfloat16",
|
25 |
+
"vocab_size": 128320
|
26 |
+
},
|
27 |
+
"timm_model_ids": [
|
28 |
+
"vit_large_patch14_reg4_dinov2.lvd142m",
|
29 |
+
"vit_so400m_patch14_siglip_224"
|
30 |
+
],
|
31 |
+
"timm_override_act_layers": [
|
32 |
+
null,
|
33 |
+
null
|
34 |
+
],
|
35 |
+
"torch_dtype": "bfloat16",
|
36 |
+
"transformers_version": "4.44.0",
|
37 |
+
"use_fused_vision_backbone": true,
|
38 |
+
"vision_backbone_id": "dinosiglip-vit-so-224px"
|
39 |
+
}
|
configuration_prismatic.py
ADDED
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
configuration_prismatic.py
|
3 |
+
|
4 |
+
HuggingFace-style configuration definition for Prismatic VLMs, inheriting from `transformers.PretrainedConfig`.
|
5 |
+
Default configuration specifies `siglip-224px+7b`.
|
6 |
+
"""
|
7 |
+
|
8 |
+
from typing import Any, Dict, List, Optional
|
9 |
+
|
10 |
+
from transformers import PretrainedConfig
|
11 |
+
from transformers.models.auto import CONFIG_MAPPING
|
12 |
+
|
13 |
+
# === Utilities for Mapping Prismatic names to HF names ===
|
14 |
+
# fmt: off
|
15 |
+
VISION_BACKBONE_TO_RESOLUTION: Dict[str, List[int]] = {
|
16 |
+
"clip-vit-l": [224], "siglip-vit-so400m": [224], "dinov2-vit-l": [224], "in1k-vit-l": [224],
|
17 |
+
|
18 |
+
"clip-vit-l-336px": [336],
|
19 |
+
"siglip-vit-so400m-384px": [384],
|
20 |
+
|
21 |
+
"dinoclip-vit-l-336px": [336, 336],
|
22 |
+
"dinosiglip-vit-so-224px": [224, 224],
|
23 |
+
"dinosiglip-vit-so-384px": [384, 384],
|
24 |
+
}
|
25 |
+
VISION_BACKBONE_TO_TIMM_ID: Dict[str, List[str]] = {
|
26 |
+
"clip-vit-l": ["vit_large_patch14_clip_224.openai"],
|
27 |
+
"clip-vit-l-336px": ["vit_large_patch14_clip_336.openai"],
|
28 |
+
|
29 |
+
"dinov2-vit-l": ["vit_large_patch14_reg4_dinov2.lvd142m"],
|
30 |
+
"in1k-vit-l": ["vit_large_patch16_224.augreg_in21k_ft_in1k"],
|
31 |
+
|
32 |
+
"siglip-vit-so400m": ["vit_so400m_patch14_siglip_224"],
|
33 |
+
"siglip-vit-so400m-384px": ["vit_so400m_patch14_siglip_384"],
|
34 |
+
|
35 |
+
"dinoclip-vit-l-336px": ["vit_large_patch14_reg4_dinov2.lvd142m", "vit_large_patch14_clip_336.openai"],
|
36 |
+
"dinosiglip-vit-so-224px": ["vit_large_patch14_reg4_dinov2.lvd142m", "vit_so400m_patch14_siglip_224"],
|
37 |
+
"dinosiglip-vit-so-384px": ["vit_large_patch14_reg4_dinov2.lvd142m", "vit_so400m_patch14_siglip_384"],
|
38 |
+
}
|
39 |
+
TIMM_OVERRIDE_ACT_LAYER: Dict[str, List[Optional[str]]] = {
|
40 |
+
"clip-vit-l": ["quick_gelu"], "clip-vit-l-336px": ["quick_gelu"],
|
41 |
+
"dinov2-vit-l": [None], "in1k-vit-l": [None],
|
42 |
+
"siglip-vit-so400m": [None], "siglip-vit-so400m-384px": [None],
|
43 |
+
"dinoclip-vit-l-336px": [None, "quick_gelu"],
|
44 |
+
"dinosiglip-vit-so-224px": [None, None], "dinosiglip-vit-so-384px": [None, None]
|
45 |
+
}
|
46 |
+
|
47 |
+
LLM_BACKBONE_TO_HF_PATH = {
|
48 |
+
"llama2-7b-pure": "meta-llama/Llama-2-7b-hf", "llama2-13b-pure": "meta-llama/Llama-2-13b-hf",
|
49 |
+
"llama2-7b-chat": "meta-llama/Llama-2-7b-chat-hf", "llama2-13b-chat": "meta-llama/Llama-2-13b-chat-hf",
|
50 |
+
"llama3-1-8b-pure": "meta-llama/Meta-Llama-3.1-8B",
|
51 |
+
|
52 |
+
"vicuna-v15-7b": "lmsys/vicuna-7b-v1.5", "vicuna-v15-13b": "lmsys/vicuna-13b-v1.5",
|
53 |
+
|
54 |
+
"mistral-v0.1-7b-pure": "mistralai/Mistral-7B-v0.1",
|
55 |
+
"mistral-v0.1-7b-instruct": "mistralai/Mistral-7B-Instruct-v0.1",
|
56 |
+
|
57 |
+
"phi-2-3b": "microsoft/phi-2",
|
58 |
+
}
|
59 |
+
LLM_BACKBONE_TO_HF_METACLASS = {
|
60 |
+
"llama2-7b-pure": "llama", "llama2-13b-pure": "llama", "llama2-7b-chat": "llama", "llama2-13b-chat": "llama",
|
61 |
+
"vicuna-v15-7b": "llama", "vicuna-v15-13b": "llama", "llama3-1-8b-pure": "llama",
|
62 |
+
|
63 |
+
"mistral-v0.1-7b-pure": "mistral", "mistral-v0.1-7b-instruct": "mistral",
|
64 |
+
|
65 |
+
"phi-2-3b": "phi",
|
66 |
+
}
|
67 |
+
|
68 |
+
VALID_VISION_BACKBONES = set(VISION_BACKBONE_TO_RESOLUTION.keys())
|
69 |
+
VALID_LLM_BACKBONES = set(LLM_BACKBONE_TO_HF_PATH)
|
70 |
+
# fmt: on
|
71 |
+
|
72 |
+
|
73 |
+
class PrismaticConfig(PretrainedConfig):
|
74 |
+
model_type: str = "prismatic"
|
75 |
+
is_composition: bool = False
|
76 |
+
|
77 |
+
def __init__(
|
78 |
+
self,
|
79 |
+
vision_backbone_id: str = "siglip-vit-so400m",
|
80 |
+
llm_backbone_id: str = "vicuna-v15-7b",
|
81 |
+
arch_specifier: str = "no-align+gelu-mlp",
|
82 |
+
use_fused_vision_backbone: Optional[bool] = None,
|
83 |
+
image_resize_strategy: str = "letterbox",
|
84 |
+
text_config: Optional[Dict[str, Any]] = None,
|
85 |
+
llm_max_length: int = 2048,
|
86 |
+
pad_token_id: int = 32000,
|
87 |
+
pad_to_multiple_of: int = 64,
|
88 |
+
output_projector_states: bool = False,
|
89 |
+
**kwargs: str,
|
90 |
+
) -> None:
|
91 |
+
if vision_backbone_id not in VALID_VISION_BACKBONES:
|
92 |
+
raise ValueError(f"Vision backbone `{vision_backbone_id}` not in {VALID_VISION_BACKBONES = }")
|
93 |
+
|
94 |
+
if llm_backbone_id not in VALID_LLM_BACKBONES:
|
95 |
+
raise ValueError(f"LLM backbone `{llm_backbone_id}` not in {VALID_LLM_BACKBONES = }")
|
96 |
+
|
97 |
+
# Set Prismatic Configuration Fields
|
98 |
+
self.vision_backbone_id = vision_backbone_id
|
99 |
+
self.llm_backbone_id = llm_backbone_id
|
100 |
+
self.arch_specifier = arch_specifier
|
101 |
+
self.output_projector_states = output_projector_states
|
102 |
+
|
103 |
+
# [Contract] All vision backbone parameters are lists =>> supports fused backbones with different preprocessing
|
104 |
+
self.use_fused_vision_backbone = (
|
105 |
+
use_fused_vision_backbone
|
106 |
+
if use_fused_vision_backbone is not None
|
107 |
+
else any(self.vision_backbone_id.startswith(v) for v in ["dinoclip", "dinosiglip"])
|
108 |
+
)
|
109 |
+
|
110 |
+
self.timm_model_ids = VISION_BACKBONE_TO_TIMM_ID[self.vision_backbone_id]
|
111 |
+
self.timm_override_act_layers = TIMM_OVERRIDE_ACT_LAYER[self.vision_backbone_id]
|
112 |
+
self.image_sizes = VISION_BACKBONE_TO_RESOLUTION[self.vision_backbone_id]
|
113 |
+
self.image_resize_strategy = image_resize_strategy
|
114 |
+
|
115 |
+
self.hf_llm_id = LLM_BACKBONE_TO_HF_PATH[self.llm_backbone_id]
|
116 |
+
self.llm_max_length = llm_max_length
|
117 |
+
self.pad_token_id, self.pad_to_multiple_of = pad_token_id, pad_to_multiple_of
|
118 |
+
|
119 |
+
# [IMPORTANT] HF Utilities actually look for a `text_config` field... we need to use that specific naming!
|
120 |
+
self.text_config = (
|
121 |
+
CONFIG_MAPPING[LLM_BACKBONE_TO_HF_METACLASS[self.llm_backbone_id]](**text_config)
|
122 |
+
if text_config is not None
|
123 |
+
else CONFIG_MAPPING[LLM_BACKBONE_TO_HF_METACLASS[self.llm_backbone_id]]()
|
124 |
+
)
|
125 |
+
|
126 |
+
# Dispatch **kwargs to super() =>> note that `pad_token_id` collides, so we pass it in here as well...
|
127 |
+
super().__init__(pad_token_id=pad_token_id, **kwargs)
|
128 |
+
|
129 |
+
|
130 |
+
class OpenVLAConfig(PrismaticConfig):
|
131 |
+
model_type: str = "openvla"
|
132 |
+
|
133 |
+
def __init__(
|
134 |
+
self,
|
135 |
+
norm_stats: Optional[Dict[str, Dict[str, Dict[str, Dict[str, List[float]]]]]] = None,
|
136 |
+
n_action_bins: int = 256,
|
137 |
+
**kwargs: str,
|
138 |
+
) -> None:
|
139 |
+
self.norm_stats, self.n_action_bins = norm_stats, n_action_bins
|
140 |
+
|
141 |
+
super().__init__(**kwargs)
|