yujiepan zhengyuansu commited on
Commit
18f69fa
·
0 Parent(s):

Duplicate from zhengyuansu/bagel-tiny-random

Browse files

Co-authored-by: Zhengyuan Su <zhengyuansu@users.noreply.huggingface.co>

.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: vllm-omni
3
+ pipeline_tag: text-to-image
4
+ inference: true
5
+ base_model:
6
+ - ByteDance-Seed/BAGEL-7B-MoT
7
+ ---
8
+
9
+ This tiny model is for debugging. It is randomly initialized with the config adapted from [ByteDance-Seed/BAGEL-7B-MoT](https://huggingface.co/ByteDance-Seed/BAGEL-7B-MoT).
10
+
11
+ File size:
12
+ - ~335MB ae.safetensors (VAE, full architecture — hardcoded in vllm-omni)
13
+ - ~41MB ema.safetensors (LLM + ViT + connectors, 1 layer each)
14
+
15
+ ### Example usage:
16
+
17
+ ```python
18
+ from vllm_omni.entrypoints.omni import Omni
19
+
20
+ omni = Omni(
21
+ model="zhengyuansu/bagel-tiny-random",
22
+ stage_configs_path="path/to/bagel_sharedmemory_2gpu_ci.yaml",
23
+ custom_pipeline_args={
24
+ "pipeline_class": "examples.flowgrpo_trainer.vllm_omni.pipeline_bagel.BagelPipelineWithLogProb"
25
+ },
26
+ )
27
+
28
+ params_list = omni.default_sampling_params_list
29
+ params_list[1].num_inference_steps = 10
30
+ params_list[1].extra_args = {"cfg_text_scale": 4.0, "cfg_img_scale": 1.5}
31
+
32
+ outputs = list(omni.generate(
33
+ prompts=[{"prompt": "a cute cat", "modalities": ["image"]}],
34
+ sampling_params_list=params_list,
35
+ ))
36
+ ```
37
+
38
+ ### Codes to create this repo:
39
+
40
+ ```python
41
+ """Create a tiny-random BAGEL model for CI testing.
42
+
43
+ Reads real BAGEL-7B-MoT checkpoint weight names, creates matching tiny random
44
+ tensors with scaled-down dimensions. VAE architecture is hardcoded in vllm-omni
45
+ and cannot be shrunk, so VAE weights are kept at full size.
46
+
47
+ Usage:
48
+ python scripts/create_tiny_bagel.py --source ByteDance-Seed/BAGEL-7B-MoT
49
+ """
50
+
51
+ import argparse
52
+ import json
53
+ import os
54
+ import re
55
+ import shutil
56
+
57
+ import torch
58
+ from safetensors import safe_open
59
+ from safetensors.torch import save_file
60
+
61
+ # LLM/ViT dimension shrinkage
62
+ EMA_DIM_MAP = {
63
+ 3584: 64, # LLM hidden_size
64
+ 18944: 128, # LLM intermediate_size
65
+ 1152: 64, # ViT hidden_size
66
+ 4304: 128, # ViT intermediate_size
67
+ 128: 32, # head_dim
68
+ 512: 64, # kv_proj dim
69
+ }
70
+
71
+ # VAE: keep original dims (architecture is hardcoded in vllm-omni)
72
+ VAE_DIM_MAP = {}
73
+
74
+ MAX_LLM_LAYERS = 1
75
+ MAX_VIT_LAYERS = 1
76
+
77
+
78
+ def shrink_dims(shape, dim_map):
79
+ return [dim_map.get(d, d) for d in shape]
80
+
81
+
82
+ def create_tiny_configs(source_dir, output_dir):
83
+ with open(os.path.join(source_dir, "config.json")) as f:
84
+ config = json.load(f)
85
+
86
+ llm = config["llm_config"]
87
+ llm["hidden_size"] = 64
88
+ llm["num_hidden_layers"] = MAX_LLM_LAYERS
89
+ llm["num_attention_heads"] = 2
90
+ llm["num_key_value_heads"] = 2
91
+ llm["intermediate_size"] = 128
92
+ llm["max_position_embeddings"] = 4096
93
+ llm["max_window_layers"] = MAX_LLM_LAYERS
94
+
95
+ vit = config["vit_config"]
96
+ vit["hidden_size"] = 64
97
+ vit["num_hidden_layers"] = MAX_VIT_LAYERS
98
+ vit["num_attention_heads"] = 2
99
+ vit["intermediate_size"] = 128
100
+
101
+ with open(os.path.join(output_dir, "config.json"), "w") as f:
102
+ json.dump(config, f, indent=4)
103
+
104
+ llm_standalone = dict(llm)
105
+ llm_standalone["qk_norm"] = True
106
+ llm_standalone["tie_word_embeddings"] = False
107
+ with open(os.path.join(output_dir, "llm_config.json"), "w") as f:
108
+ json.dump(llm_standalone, f, indent=4)
109
+
110
+ with open(os.path.join(output_dir, "vit_config.json"), "w") as f:
111
+ json.dump(dict(vit), f, indent=4)
112
+
113
+ return config
114
+
115
+
116
+ def create_tiny_weights(source_path, dim_map, max_layers, seed=42):
117
+ gen = torch.Generator().manual_seed(seed)
118
+ weights = {}
119
+ is_vae = "ae" in os.path.basename(source_path).lower()
120
+ dtype = torch.float32 if is_vae else torch.bfloat16
121
+
122
+ with safe_open(source_path, framework="pt") as f:
123
+ for name in f.keys():
124
+ m = re.search(r"\.layers\.(\d+)\.", name)
125
+ if m:
126
+ idx = int(m.group(1))
127
+ for pattern, limit in max_layers.items():
128
+ if pattern in name and idx >= limit:
129
+ break
130
+ else:
131
+ pass
132
+ if m and any(p in name for p in max_layers) and idx >= max_layers.get(
133
+ next((p for p in max_layers if p in name), ""), 999
134
+ ):
135
+ continue
136
+
137
+ real_shape = list(f.get_tensor(name).shape)
138
+ tiny_shape = shrink_dims(real_shape, dim_map)
139
+
140
+ if "norm" in name and len(tiny_shape) == 1:
141
+ weights[name] = torch.ones(tiny_shape, dtype=dtype)
142
+ else:
143
+ weights[name] = torch.randn(tiny_shape, generator=gen, dtype=dtype) * 0.02
144
+
145
+ return weights
146
+
147
+
148
+ def main():
149
+ parser = argparse.ArgumentParser()
150
+ parser.add_argument("--source", default="ByteDance-Seed/BAGEL-7B-MoT")
151
+ parser.add_argument("--output", default=os.path.expanduser("~/models/tiny-random/BAGEL-7B-MoT"))
152
+ args = parser.parse_args()
153
+
154
+ source_dir = args.source
155
+ if not os.path.exists(os.path.join(source_dir, "config.json")):
156
+ from huggingface_hub import snapshot_download
157
+ source_dir = snapshot_download(source_dir)
158
+
159
+ output_dir = args.output
160
+ os.makedirs(output_dir, exist_ok=True)
161
+
162
+ create_tiny_configs(source_dir, output_dir)
163
+
164
+ for fname in ["generation_config.json", "preprocessor_config.json", "tokenizer.json",
165
+ "tokenizer_config.json", "vocab.json", "merges.txt"]:
166
+ src = os.path.join(source_dir, fname)
167
+ if os.path.exists(src):
168
+ shutil.copy2(src, os.path.join(output_dir, fname))
169
+
170
+ ema = create_tiny_weights(
171
+ os.path.join(source_dir, "ema.safetensors"),
172
+ dim_map=EMA_DIM_MAP,
173
+ max_layers={"language_model": MAX_LLM_LAYERS, "vit_model": MAX_VIT_LAYERS},
174
+ seed=42,
175
+ )
176
+ save_file(ema, os.path.join(output_dir, "ema.safetensors"))
177
+
178
+ vae = create_tiny_weights(
179
+ os.path.join(source_dir, "ae.safetensors"),
180
+ dim_map=VAE_DIM_MAP,
181
+ max_layers={},
182
+ seed=43,
183
+ )
184
+ save_file(vae, os.path.join(output_dir, "ae.safetensors"))
185
+
186
+ weight_map = {k: "ema.safetensors" for k in ema}
187
+ weight_map.update({k: "ae.safetensors" for k in vae})
188
+ total_size = sum(t.numel() * t.element_size() for t in ema.values())
189
+ total_size += sum(t.numel() * t.element_size() for t in vae.values())
190
+ with open(os.path.join(output_dir, "model.safetensors.index.json"), "w") as f:
191
+ json.dump({"metadata": {"total_size": total_size}, "weight_map": weight_map}, f, indent=4)
192
+
193
+
194
+ if __name__ == "__main__":
195
+ main()
196
+ ```
ae.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a5fa0e75c982c563fbf8c188b953f9307749f7f7e3a9e1ff68cd6a284fcbc2a
3
+ size 335303916
config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BagelForConditionalGeneration"
4
+ ],
5
+ "model_type": "bagel",
6
+ "visual_gen": true,
7
+ "visual_und": true,
8
+ "llm_config": {
9
+ "architectures": [
10
+ "Qwen2ForCausalLM"
11
+ ],
12
+ "attention_dropout": 0.0,
13
+ "bos_token_id": 151643,
14
+ "eos_token_id": 151645,
15
+ "hidden_act": "silu",
16
+ "hidden_size": 64,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 128,
19
+ "max_position_embeddings": 4096,
20
+ "max_window_layers": 1,
21
+ "model_type": "qwen2",
22
+ "num_attention_heads": 2,
23
+ "num_hidden_layers": 1,
24
+ "num_key_value_heads": 2,
25
+ "qk_norm": true,
26
+ "rms_norm_eps": 1e-06,
27
+ "rope_theta": 1000000.0,
28
+ "sliding_window": 131072,
29
+ "tie_word_embeddings": false,
30
+ "torch_dtype": "bfloat16",
31
+ "transformers_version": "4.43.1",
32
+ "use_cache": true,
33
+ "use_sliding_window": false,
34
+ "vocab_size": 152064
35
+ },
36
+ "vit_config": {
37
+ "hidden_size": 64,
38
+ "image_size": 980,
39
+ "intermediate_size": 128,
40
+ "model_type": "siglip_vision_model",
41
+ "num_attention_heads": 2,
42
+ "num_hidden_layers": 1,
43
+ "patch_size": 14,
44
+ "num_channels": 3
45
+ },
46
+ "vae_config": {
47
+ "z_channels": 16,
48
+ "downsample": 8
49
+ },
50
+ "latent_patch_size": 2,
51
+ "max_latent_size": 32,
52
+ "vit_max_num_patch_per_side": 70,
53
+ "connector_act": "gelu_pytorch_tanh",
54
+ "interpolate_pos": false,
55
+ "timestep_shift": 1.0,
56
+ "torch_dtype": "bfloat16",
57
+ "transformers_version": "4.49.0"
58
+ }
ema.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cff4c7b68f95b4abaf0fdeaa6593b9bb3f33b61623dfad14759b2d80ca402da
3
+ size 41097632
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "pad_token_id": 151643,
4
+ "do_sample": true,
5
+ "eos_token_id": [
6
+ 151645,
7
+ 151643
8
+ ],
9
+ "repetition_penalty": 1.05,
10
+ "temperature": 0.7,
11
+ "top_p": 0.8,
12
+ "top_k": 20,
13
+ "transformers_version": "4.37.0"
14
+ }
llm_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2ForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 151643,
7
+ "eos_token_id": 151645,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 64,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 128,
12
+ "max_position_embeddings": 4096,
13
+ "max_window_layers": 1,
14
+ "model_type": "qwen2",
15
+ "num_attention_heads": 2,
16
+ "num_hidden_layers": 1,
17
+ "num_key_value_heads": 2,
18
+ "qk_norm": true,
19
+ "rms_norm_eps": 1e-06,
20
+ "rope_theta": 1000000.0,
21
+ "sliding_window": 131072,
22
+ "tie_word_embeddings": false,
23
+ "torch_dtype": "bfloat16",
24
+ "transformers_version": "4.43.1",
25
+ "use_cache": true,
26
+ "use_sliding_window": false,
27
+ "vocab_size": 152064
28
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors.index.json ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 376368524
4
+ },
5
+ "weight_map": {
6
+ "connector.fc1.bias": "ema.safetensors",
7
+ "connector.fc1.weight": "ema.safetensors",
8
+ "connector.fc2.bias": "ema.safetensors",
9
+ "connector.fc2.weight": "ema.safetensors",
10
+ "language_model.lm_head.weight": "ema.safetensors",
11
+ "language_model.model.embed_tokens.weight": "ema.safetensors",
12
+ "language_model.model.layers.0.input_layernorm.weight": "ema.safetensors",
13
+ "language_model.model.layers.0.input_layernorm_moe_gen.weight": "ema.safetensors",
14
+ "language_model.model.layers.0.mlp.down_proj.weight": "ema.safetensors",
15
+ "language_model.model.layers.0.mlp.gate_proj.weight": "ema.safetensors",
16
+ "language_model.model.layers.0.mlp.up_proj.weight": "ema.safetensors",
17
+ "language_model.model.layers.0.mlp_moe_gen.down_proj.weight": "ema.safetensors",
18
+ "language_model.model.layers.0.mlp_moe_gen.gate_proj.weight": "ema.safetensors",
19
+ "language_model.model.layers.0.mlp_moe_gen.up_proj.weight": "ema.safetensors",
20
+ "language_model.model.layers.0.post_attention_layernorm.weight": "ema.safetensors",
21
+ "language_model.model.layers.0.post_attention_layernorm_moe_gen.weight": "ema.safetensors",
22
+ "language_model.model.layers.0.self_attn.k_norm.weight": "ema.safetensors",
23
+ "language_model.model.layers.0.self_attn.k_norm_moe_gen.weight": "ema.safetensors",
24
+ "language_model.model.layers.0.self_attn.k_proj.bias": "ema.safetensors",
25
+ "language_model.model.layers.0.self_attn.k_proj.weight": "ema.safetensors",
26
+ "language_model.model.layers.0.self_attn.k_proj_moe_gen.bias": "ema.safetensors",
27
+ "language_model.model.layers.0.self_attn.k_proj_moe_gen.weight": "ema.safetensors",
28
+ "language_model.model.layers.0.self_attn.o_proj.weight": "ema.safetensors",
29
+ "language_model.model.layers.0.self_attn.o_proj_moe_gen.weight": "ema.safetensors",
30
+ "language_model.model.layers.0.self_attn.q_norm.weight": "ema.safetensors",
31
+ "language_model.model.layers.0.self_attn.q_norm_moe_gen.weight": "ema.safetensors",
32
+ "language_model.model.layers.0.self_attn.q_proj.bias": "ema.safetensors",
33
+ "language_model.model.layers.0.self_attn.q_proj.weight": "ema.safetensors",
34
+ "language_model.model.layers.0.self_attn.q_proj_moe_gen.bias": "ema.safetensors",
35
+ "language_model.model.layers.0.self_attn.q_proj_moe_gen.weight": "ema.safetensors",
36
+ "language_model.model.layers.0.self_attn.v_proj.bias": "ema.safetensors",
37
+ "language_model.model.layers.0.self_attn.v_proj.weight": "ema.safetensors",
38
+ "language_model.model.layers.0.self_attn.v_proj_moe_gen.bias": "ema.safetensors",
39
+ "language_model.model.layers.0.self_attn.v_proj_moe_gen.weight": "ema.safetensors",
40
+ "language_model.model.norm.weight": "ema.safetensors",
41
+ "language_model.model.norm_moe_gen.weight": "ema.safetensors",
42
+ "latent_pos_embed.pos_embed": "ema.safetensors",
43
+ "llm2vae.bias": "ema.safetensors",
44
+ "llm2vae.weight": "ema.safetensors",
45
+ "time_embedder.mlp.0.bias": "ema.safetensors",
46
+ "time_embedder.mlp.0.weight": "ema.safetensors",
47
+ "time_embedder.mlp.2.bias": "ema.safetensors",
48
+ "time_embedder.mlp.2.weight": "ema.safetensors",
49
+ "vae2llm.bias": "ema.safetensors",
50
+ "vae2llm.weight": "ema.safetensors",
51
+ "vit_model.vision_model.embeddings.patch_embedding.bias": "ema.safetensors",
52
+ "vit_model.vision_model.embeddings.patch_embedding.weight": "ema.safetensors",
53
+ "vit_model.vision_model.embeddings.position_embedding.weight": "ema.safetensors",
54
+ "vit_model.vision_model.encoder.layers.0.layer_norm1.bias": "ema.safetensors",
55
+ "vit_model.vision_model.encoder.layers.0.layer_norm1.weight": "ema.safetensors",
56
+ "vit_model.vision_model.encoder.layers.0.layer_norm2.bias": "ema.safetensors",
57
+ "vit_model.vision_model.encoder.layers.0.layer_norm2.weight": "ema.safetensors",
58
+ "vit_model.vision_model.encoder.layers.0.mlp.fc1.bias": "ema.safetensors",
59
+ "vit_model.vision_model.encoder.layers.0.mlp.fc1.weight": "ema.safetensors",
60
+ "vit_model.vision_model.encoder.layers.0.mlp.fc2.bias": "ema.safetensors",
61
+ "vit_model.vision_model.encoder.layers.0.mlp.fc2.weight": "ema.safetensors",
62
+ "vit_model.vision_model.encoder.layers.0.self_attn.k_proj.bias": "ema.safetensors",
63
+ "vit_model.vision_model.encoder.layers.0.self_attn.k_proj.weight": "ema.safetensors",
64
+ "vit_model.vision_model.encoder.layers.0.self_attn.out_proj.bias": "ema.safetensors",
65
+ "vit_model.vision_model.encoder.layers.0.self_attn.out_proj.weight": "ema.safetensors",
66
+ "vit_model.vision_model.encoder.layers.0.self_attn.q_proj.bias": "ema.safetensors",
67
+ "vit_model.vision_model.encoder.layers.0.self_attn.q_proj.weight": "ema.safetensors",
68
+ "vit_model.vision_model.encoder.layers.0.self_attn.v_proj.bias": "ema.safetensors",
69
+ "vit_model.vision_model.encoder.layers.0.self_attn.v_proj.weight": "ema.safetensors",
70
+ "vit_model.vision_model.post_layernorm.bias": "ema.safetensors",
71
+ "vit_model.vision_model.post_layernorm.weight": "ema.safetensors",
72
+ "vit_pos_embed.pos_embed": "ema.safetensors",
73
+ "decoder.conv_in.bias": "ae.safetensors",
74
+ "decoder.conv_in.weight": "ae.safetensors",
75
+ "decoder.conv_out.bias": "ae.safetensors",
76
+ "decoder.conv_out.weight": "ae.safetensors",
77
+ "decoder.mid.attn_1.k.bias": "ae.safetensors",
78
+ "decoder.mid.attn_1.k.weight": "ae.safetensors",
79
+ "decoder.mid.attn_1.norm.bias": "ae.safetensors",
80
+ "decoder.mid.attn_1.norm.weight": "ae.safetensors",
81
+ "decoder.mid.attn_1.proj_out.bias": "ae.safetensors",
82
+ "decoder.mid.attn_1.proj_out.weight": "ae.safetensors",
83
+ "decoder.mid.attn_1.q.bias": "ae.safetensors",
84
+ "decoder.mid.attn_1.q.weight": "ae.safetensors",
85
+ "decoder.mid.attn_1.v.bias": "ae.safetensors",
86
+ "decoder.mid.attn_1.v.weight": "ae.safetensors",
87
+ "decoder.mid.block_1.conv1.bias": "ae.safetensors",
88
+ "decoder.mid.block_1.conv1.weight": "ae.safetensors",
89
+ "decoder.mid.block_1.conv2.bias": "ae.safetensors",
90
+ "decoder.mid.block_1.conv2.weight": "ae.safetensors",
91
+ "decoder.mid.block_1.norm1.bias": "ae.safetensors",
92
+ "decoder.mid.block_1.norm1.weight": "ae.safetensors",
93
+ "decoder.mid.block_1.norm2.bias": "ae.safetensors",
94
+ "decoder.mid.block_1.norm2.weight": "ae.safetensors",
95
+ "decoder.mid.block_2.conv1.bias": "ae.safetensors",
96
+ "decoder.mid.block_2.conv1.weight": "ae.safetensors",
97
+ "decoder.mid.block_2.conv2.bias": "ae.safetensors",
98
+ "decoder.mid.block_2.conv2.weight": "ae.safetensors",
99
+ "decoder.mid.block_2.norm1.bias": "ae.safetensors",
100
+ "decoder.mid.block_2.norm1.weight": "ae.safetensors",
101
+ "decoder.mid.block_2.norm2.bias": "ae.safetensors",
102
+ "decoder.mid.block_2.norm2.weight": "ae.safetensors",
103
+ "decoder.norm_out.bias": "ae.safetensors",
104
+ "decoder.norm_out.weight": "ae.safetensors",
105
+ "decoder.up.0.block.0.conv1.bias": "ae.safetensors",
106
+ "decoder.up.0.block.0.conv1.weight": "ae.safetensors",
107
+ "decoder.up.0.block.0.conv2.bias": "ae.safetensors",
108
+ "decoder.up.0.block.0.conv2.weight": "ae.safetensors",
109
+ "decoder.up.0.block.0.nin_shortcut.bias": "ae.safetensors",
110
+ "decoder.up.0.block.0.nin_shortcut.weight": "ae.safetensors",
111
+ "decoder.up.0.block.0.norm1.bias": "ae.safetensors",
112
+ "decoder.up.0.block.0.norm1.weight": "ae.safetensors",
113
+ "decoder.up.0.block.0.norm2.bias": "ae.safetensors",
114
+ "decoder.up.0.block.0.norm2.weight": "ae.safetensors",
115
+ "decoder.up.0.block.1.conv1.bias": "ae.safetensors",
116
+ "decoder.up.0.block.1.conv1.weight": "ae.safetensors",
117
+ "decoder.up.0.block.1.conv2.bias": "ae.safetensors",
118
+ "decoder.up.0.block.1.conv2.weight": "ae.safetensors",
119
+ "decoder.up.0.block.1.norm1.bias": "ae.safetensors",
120
+ "decoder.up.0.block.1.norm1.weight": "ae.safetensors",
121
+ "decoder.up.0.block.1.norm2.bias": "ae.safetensors",
122
+ "decoder.up.0.block.1.norm2.weight": "ae.safetensors",
123
+ "decoder.up.0.block.2.conv1.bias": "ae.safetensors",
124
+ "decoder.up.0.block.2.conv1.weight": "ae.safetensors",
125
+ "decoder.up.0.block.2.conv2.bias": "ae.safetensors",
126
+ "decoder.up.0.block.2.conv2.weight": "ae.safetensors",
127
+ "decoder.up.0.block.2.norm1.bias": "ae.safetensors",
128
+ "decoder.up.0.block.2.norm1.weight": "ae.safetensors",
129
+ "decoder.up.0.block.2.norm2.bias": "ae.safetensors",
130
+ "decoder.up.0.block.2.norm2.weight": "ae.safetensors",
131
+ "decoder.up.1.block.0.conv1.bias": "ae.safetensors",
132
+ "decoder.up.1.block.0.conv1.weight": "ae.safetensors",
133
+ "decoder.up.1.block.0.conv2.bias": "ae.safetensors",
134
+ "decoder.up.1.block.0.conv2.weight": "ae.safetensors",
135
+ "decoder.up.1.block.0.nin_shortcut.bias": "ae.safetensors",
136
+ "decoder.up.1.block.0.nin_shortcut.weight": "ae.safetensors",
137
+ "decoder.up.1.block.0.norm1.bias": "ae.safetensors",
138
+ "decoder.up.1.block.0.norm1.weight": "ae.safetensors",
139
+ "decoder.up.1.block.0.norm2.bias": "ae.safetensors",
140
+ "decoder.up.1.block.0.norm2.weight": "ae.safetensors",
141
+ "decoder.up.1.block.1.conv1.bias": "ae.safetensors",
142
+ "decoder.up.1.block.1.conv1.weight": "ae.safetensors",
143
+ "decoder.up.1.block.1.conv2.bias": "ae.safetensors",
144
+ "decoder.up.1.block.1.conv2.weight": "ae.safetensors",
145
+ "decoder.up.1.block.1.norm1.bias": "ae.safetensors",
146
+ "decoder.up.1.block.1.norm1.weight": "ae.safetensors",
147
+ "decoder.up.1.block.1.norm2.bias": "ae.safetensors",
148
+ "decoder.up.1.block.1.norm2.weight": "ae.safetensors",
149
+ "decoder.up.1.block.2.conv1.bias": "ae.safetensors",
150
+ "decoder.up.1.block.2.conv1.weight": "ae.safetensors",
151
+ "decoder.up.1.block.2.conv2.bias": "ae.safetensors",
152
+ "decoder.up.1.block.2.conv2.weight": "ae.safetensors",
153
+ "decoder.up.1.block.2.norm1.bias": "ae.safetensors",
154
+ "decoder.up.1.block.2.norm1.weight": "ae.safetensors",
155
+ "decoder.up.1.block.2.norm2.bias": "ae.safetensors",
156
+ "decoder.up.1.block.2.norm2.weight": "ae.safetensors",
157
+ "decoder.up.1.upsample.conv.bias": "ae.safetensors",
158
+ "decoder.up.1.upsample.conv.weight": "ae.safetensors",
159
+ "decoder.up.2.block.0.conv1.bias": "ae.safetensors",
160
+ "decoder.up.2.block.0.conv1.weight": "ae.safetensors",
161
+ "decoder.up.2.block.0.conv2.bias": "ae.safetensors",
162
+ "decoder.up.2.block.0.conv2.weight": "ae.safetensors",
163
+ "decoder.up.2.block.0.norm1.bias": "ae.safetensors",
164
+ "decoder.up.2.block.0.norm1.weight": "ae.safetensors",
165
+ "decoder.up.2.block.0.norm2.bias": "ae.safetensors",
166
+ "decoder.up.2.block.0.norm2.weight": "ae.safetensors",
167
+ "decoder.up.2.block.1.conv1.bias": "ae.safetensors",
168
+ "decoder.up.2.block.1.conv1.weight": "ae.safetensors",
169
+ "decoder.up.2.block.1.conv2.bias": "ae.safetensors",
170
+ "decoder.up.2.block.1.conv2.weight": "ae.safetensors",
171
+ "decoder.up.2.block.1.norm1.bias": "ae.safetensors",
172
+ "decoder.up.2.block.1.norm1.weight": "ae.safetensors",
173
+ "decoder.up.2.block.1.norm2.bias": "ae.safetensors",
174
+ "decoder.up.2.block.1.norm2.weight": "ae.safetensors",
175
+ "decoder.up.2.block.2.conv1.bias": "ae.safetensors",
176
+ "decoder.up.2.block.2.conv1.weight": "ae.safetensors",
177
+ "decoder.up.2.block.2.conv2.bias": "ae.safetensors",
178
+ "decoder.up.2.block.2.conv2.weight": "ae.safetensors",
179
+ "decoder.up.2.block.2.norm1.bias": "ae.safetensors",
180
+ "decoder.up.2.block.2.norm1.weight": "ae.safetensors",
181
+ "decoder.up.2.block.2.norm2.bias": "ae.safetensors",
182
+ "decoder.up.2.block.2.norm2.weight": "ae.safetensors",
183
+ "decoder.up.2.upsample.conv.bias": "ae.safetensors",
184
+ "decoder.up.2.upsample.conv.weight": "ae.safetensors",
185
+ "decoder.up.3.block.0.conv1.bias": "ae.safetensors",
186
+ "decoder.up.3.block.0.conv1.weight": "ae.safetensors",
187
+ "decoder.up.3.block.0.conv2.bias": "ae.safetensors",
188
+ "decoder.up.3.block.0.conv2.weight": "ae.safetensors",
189
+ "decoder.up.3.block.0.norm1.bias": "ae.safetensors",
190
+ "decoder.up.3.block.0.norm1.weight": "ae.safetensors",
191
+ "decoder.up.3.block.0.norm2.bias": "ae.safetensors",
192
+ "decoder.up.3.block.0.norm2.weight": "ae.safetensors",
193
+ "decoder.up.3.block.1.conv1.bias": "ae.safetensors",
194
+ "decoder.up.3.block.1.conv1.weight": "ae.safetensors",
195
+ "decoder.up.3.block.1.conv2.bias": "ae.safetensors",
196
+ "decoder.up.3.block.1.conv2.weight": "ae.safetensors",
197
+ "decoder.up.3.block.1.norm1.bias": "ae.safetensors",
198
+ "decoder.up.3.block.1.norm1.weight": "ae.safetensors",
199
+ "decoder.up.3.block.1.norm2.bias": "ae.safetensors",
200
+ "decoder.up.3.block.1.norm2.weight": "ae.safetensors",
201
+ "decoder.up.3.block.2.conv1.bias": "ae.safetensors",
202
+ "decoder.up.3.block.2.conv1.weight": "ae.safetensors",
203
+ "decoder.up.3.block.2.conv2.bias": "ae.safetensors",
204
+ "decoder.up.3.block.2.conv2.weight": "ae.safetensors",
205
+ "decoder.up.3.block.2.norm1.bias": "ae.safetensors",
206
+ "decoder.up.3.block.2.norm1.weight": "ae.safetensors",
207
+ "decoder.up.3.block.2.norm2.bias": "ae.safetensors",
208
+ "decoder.up.3.block.2.norm2.weight": "ae.safetensors",
209
+ "decoder.up.3.upsample.conv.bias": "ae.safetensors",
210
+ "decoder.up.3.upsample.conv.weight": "ae.safetensors",
211
+ "encoder.conv_in.bias": "ae.safetensors",
212
+ "encoder.conv_in.weight": "ae.safetensors",
213
+ "encoder.conv_out.bias": "ae.safetensors",
214
+ "encoder.conv_out.weight": "ae.safetensors",
215
+ "encoder.down.0.block.0.conv1.bias": "ae.safetensors",
216
+ "encoder.down.0.block.0.conv1.weight": "ae.safetensors",
217
+ "encoder.down.0.block.0.conv2.bias": "ae.safetensors",
218
+ "encoder.down.0.block.0.conv2.weight": "ae.safetensors",
219
+ "encoder.down.0.block.0.norm1.bias": "ae.safetensors",
220
+ "encoder.down.0.block.0.norm1.weight": "ae.safetensors",
221
+ "encoder.down.0.block.0.norm2.bias": "ae.safetensors",
222
+ "encoder.down.0.block.0.norm2.weight": "ae.safetensors",
223
+ "encoder.down.0.block.1.conv1.bias": "ae.safetensors",
224
+ "encoder.down.0.block.1.conv1.weight": "ae.safetensors",
225
+ "encoder.down.0.block.1.conv2.bias": "ae.safetensors",
226
+ "encoder.down.0.block.1.conv2.weight": "ae.safetensors",
227
+ "encoder.down.0.block.1.norm1.bias": "ae.safetensors",
228
+ "encoder.down.0.block.1.norm1.weight": "ae.safetensors",
229
+ "encoder.down.0.block.1.norm2.bias": "ae.safetensors",
230
+ "encoder.down.0.block.1.norm2.weight": "ae.safetensors",
231
+ "encoder.down.0.downsample.conv.bias": "ae.safetensors",
232
+ "encoder.down.0.downsample.conv.weight": "ae.safetensors",
233
+ "encoder.down.1.block.0.conv1.bias": "ae.safetensors",
234
+ "encoder.down.1.block.0.conv1.weight": "ae.safetensors",
235
+ "encoder.down.1.block.0.conv2.bias": "ae.safetensors",
236
+ "encoder.down.1.block.0.conv2.weight": "ae.safetensors",
237
+ "encoder.down.1.block.0.nin_shortcut.bias": "ae.safetensors",
238
+ "encoder.down.1.block.0.nin_shortcut.weight": "ae.safetensors",
239
+ "encoder.down.1.block.0.norm1.bias": "ae.safetensors",
240
+ "encoder.down.1.block.0.norm1.weight": "ae.safetensors",
241
+ "encoder.down.1.block.0.norm2.bias": "ae.safetensors",
242
+ "encoder.down.1.block.0.norm2.weight": "ae.safetensors",
243
+ "encoder.down.1.block.1.conv1.bias": "ae.safetensors",
244
+ "encoder.down.1.block.1.conv1.weight": "ae.safetensors",
245
+ "encoder.down.1.block.1.conv2.bias": "ae.safetensors",
246
+ "encoder.down.1.block.1.conv2.weight": "ae.safetensors",
247
+ "encoder.down.1.block.1.norm1.bias": "ae.safetensors",
248
+ "encoder.down.1.block.1.norm1.weight": "ae.safetensors",
249
+ "encoder.down.1.block.1.norm2.bias": "ae.safetensors",
250
+ "encoder.down.1.block.1.norm2.weight": "ae.safetensors",
251
+ "encoder.down.1.downsample.conv.bias": "ae.safetensors",
252
+ "encoder.down.1.downsample.conv.weight": "ae.safetensors",
253
+ "encoder.down.2.block.0.conv1.bias": "ae.safetensors",
254
+ "encoder.down.2.block.0.conv1.weight": "ae.safetensors",
255
+ "encoder.down.2.block.0.conv2.bias": "ae.safetensors",
256
+ "encoder.down.2.block.0.conv2.weight": "ae.safetensors",
257
+ "encoder.down.2.block.0.nin_shortcut.bias": "ae.safetensors",
258
+ "encoder.down.2.block.0.nin_shortcut.weight": "ae.safetensors",
259
+ "encoder.down.2.block.0.norm1.bias": "ae.safetensors",
260
+ "encoder.down.2.block.0.norm1.weight": "ae.safetensors",
261
+ "encoder.down.2.block.0.norm2.bias": "ae.safetensors",
262
+ "encoder.down.2.block.0.norm2.weight": "ae.safetensors",
263
+ "encoder.down.2.block.1.conv1.bias": "ae.safetensors",
264
+ "encoder.down.2.block.1.conv1.weight": "ae.safetensors",
265
+ "encoder.down.2.block.1.conv2.bias": "ae.safetensors",
266
+ "encoder.down.2.block.1.conv2.weight": "ae.safetensors",
267
+ "encoder.down.2.block.1.norm1.bias": "ae.safetensors",
268
+ "encoder.down.2.block.1.norm1.weight": "ae.safetensors",
269
+ "encoder.down.2.block.1.norm2.bias": "ae.safetensors",
270
+ "encoder.down.2.block.1.norm2.weight": "ae.safetensors",
271
+ "encoder.down.2.downsample.conv.bias": "ae.safetensors",
272
+ "encoder.down.2.downsample.conv.weight": "ae.safetensors",
273
+ "encoder.down.3.block.0.conv1.bias": "ae.safetensors",
274
+ "encoder.down.3.block.0.conv1.weight": "ae.safetensors",
275
+ "encoder.down.3.block.0.conv2.bias": "ae.safetensors",
276
+ "encoder.down.3.block.0.conv2.weight": "ae.safetensors",
277
+ "encoder.down.3.block.0.norm1.bias": "ae.safetensors",
278
+ "encoder.down.3.block.0.norm1.weight": "ae.safetensors",
279
+ "encoder.down.3.block.0.norm2.bias": "ae.safetensors",
280
+ "encoder.down.3.block.0.norm2.weight": "ae.safetensors",
281
+ "encoder.down.3.block.1.conv1.bias": "ae.safetensors",
282
+ "encoder.down.3.block.1.conv1.weight": "ae.safetensors",
283
+ "encoder.down.3.block.1.conv2.bias": "ae.safetensors",
284
+ "encoder.down.3.block.1.conv2.weight": "ae.safetensors",
285
+ "encoder.down.3.block.1.norm1.bias": "ae.safetensors",
286
+ "encoder.down.3.block.1.norm1.weight": "ae.safetensors",
287
+ "encoder.down.3.block.1.norm2.bias": "ae.safetensors",
288
+ "encoder.down.3.block.1.norm2.weight": "ae.safetensors",
289
+ "encoder.mid.attn_1.k.bias": "ae.safetensors",
290
+ "encoder.mid.attn_1.k.weight": "ae.safetensors",
291
+ "encoder.mid.attn_1.norm.bias": "ae.safetensors",
292
+ "encoder.mid.attn_1.norm.weight": "ae.safetensors",
293
+ "encoder.mid.attn_1.proj_out.bias": "ae.safetensors",
294
+ "encoder.mid.attn_1.proj_out.weight": "ae.safetensors",
295
+ "encoder.mid.attn_1.q.bias": "ae.safetensors",
296
+ "encoder.mid.attn_1.q.weight": "ae.safetensors",
297
+ "encoder.mid.attn_1.v.bias": "ae.safetensors",
298
+ "encoder.mid.attn_1.v.weight": "ae.safetensors",
299
+ "encoder.mid.block_1.conv1.bias": "ae.safetensors",
300
+ "encoder.mid.block_1.conv1.weight": "ae.safetensors",
301
+ "encoder.mid.block_1.conv2.bias": "ae.safetensors",
302
+ "encoder.mid.block_1.conv2.weight": "ae.safetensors",
303
+ "encoder.mid.block_1.norm1.bias": "ae.safetensors",
304
+ "encoder.mid.block_1.norm1.weight": "ae.safetensors",
305
+ "encoder.mid.block_1.norm2.bias": "ae.safetensors",
306
+ "encoder.mid.block_1.norm2.weight": "ae.safetensors",
307
+ "encoder.mid.block_2.conv1.bias": "ae.safetensors",
308
+ "encoder.mid.block_2.conv1.weight": "ae.safetensors",
309
+ "encoder.mid.block_2.conv2.bias": "ae.safetensors",
310
+ "encoder.mid.block_2.conv2.weight": "ae.safetensors",
311
+ "encoder.mid.block_2.norm1.bias": "ae.safetensors",
312
+ "encoder.mid.block_2.norm1.weight": "ae.safetensors",
313
+ "encoder.mid.block_2.norm2.bias": "ae.safetensors",
314
+ "encoder.mid.block_2.norm2.weight": "ae.safetensors",
315
+ "encoder.norm_out.bias": "ae.safetensors",
316
+ "encoder.norm_out.weight": "ae.safetensors"
317
+ }
318
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": true,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.5,
8
+ 0.5,
9
+ 0.5
10
+ ],
11
+ "image_processor_type": "SiglipImageProcessor",
12
+ "image_std": [
13
+ 0.5,
14
+ 0.5,
15
+ 0.5
16
+ ],
17
+ "processor_class": "BagelProcessor",
18
+ "rescale_factor": 0.00392156862745098,
19
+ "resample": 3,
20
+ "size": {
21
+ "height": 980,
22
+ "width": 980
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
+ "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|im_end|>",
201
+ "errors": "replace",
202
+ "model_max_length": 131072,
203
+ "pad_token": "<|endoftext|>",
204
+ "split_special_tokens": false,
205
+ "tokenizer_class": "Qwen2Tokenizer",
206
+ "unk_token": null
207
+ }
vit_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "hidden_size": 64,
3
+ "image_size": 980,
4
+ "intermediate_size": 128,
5
+ "model_type": "siglip_vision_model",
6
+ "num_attention_heads": 2,
7
+ "num_hidden_layers": 1,
8
+ "patch_size": 14,
9
+ "num_channels": 3
10
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff