bebechien RyanMullins osanseviero Douglas Reid commited on
Commit
c1c4cf1
·
0 Parent(s):

initial commit

Browse files

Co-authored-by: RyanMullins <RyanMullins@users.noreply.huggingface.co>
Co-authored-by: osanseviero <osanseviero@users.noreply.huggingface.co>
Co-authored-by: Douglas Reid <Douglas Reid@users.noreply.huggingface.co>

.gitattributes ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ pipeline_tag: image-text-to-text
4
+ ---
5
+ ⚠️ EARLY ACCESS: This is not the final model behaviour or example. They are subject to change and further improvements.
config.json ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Gemma4ForConditionalGeneration"
4
+ ],
5
+ "audio_config": null,
6
+ "audio_ms_per_token": 40,
7
+ "audio_token_id": 258881,
8
+ "boa_token_id": 256000,
9
+ "boi_token_id": 255999,
10
+ "dtype": "bfloat16",
11
+ "eoa_token_id": 258883,
12
+ "eoi_token_id": 258882,
13
+ "image_token_id": 258880,
14
+ "initializer_range": 0.02,
15
+ "model_type": "gemma4",
16
+ "text_config": {
17
+ "_sliding_window_pattern": 6,
18
+ "attention_bias": false,
19
+ "attention_dropout": 0.0,
20
+ "attention_k_eq_v": true,
21
+ "attn_logit_softcapping": null,
22
+ "bos_token_id": 2,
23
+ "dtype": "bfloat16",
24
+ "enable_moe_block": false,
25
+ "eos_token_id": 1,
26
+ "expert_intermediate_size": null,
27
+ "final_logit_softcapping": 30.0,
28
+ "global_head_dim": 512,
29
+ "head_dim": 256,
30
+ "hidden_activation": "gelu_pytorch_tanh",
31
+ "hidden_size": 5376,
32
+ "hidden_size_per_layer_input": null,
33
+ "initializer_range": 0.02,
34
+ "intermediate_size": 21504,
35
+ "layer_types": [
36
+ "sliding_attention",
37
+ "sliding_attention",
38
+ "sliding_attention",
39
+ "sliding_attention",
40
+ "sliding_attention",
41
+ "full_attention",
42
+ "sliding_attention",
43
+ "sliding_attention",
44
+ "sliding_attention",
45
+ "sliding_attention",
46
+ "sliding_attention",
47
+ "full_attention",
48
+ "sliding_attention",
49
+ "sliding_attention",
50
+ "sliding_attention",
51
+ "sliding_attention",
52
+ "sliding_attention",
53
+ "full_attention",
54
+ "sliding_attention",
55
+ "sliding_attention",
56
+ "sliding_attention",
57
+ "sliding_attention",
58
+ "sliding_attention",
59
+ "full_attention",
60
+ "sliding_attention",
61
+ "sliding_attention",
62
+ "sliding_attention",
63
+ "sliding_attention",
64
+ "sliding_attention",
65
+ "full_attention",
66
+ "sliding_attention",
67
+ "sliding_attention",
68
+ "sliding_attention",
69
+ "sliding_attention",
70
+ "sliding_attention",
71
+ "full_attention",
72
+ "sliding_attention",
73
+ "sliding_attention",
74
+ "sliding_attention",
75
+ "sliding_attention",
76
+ "sliding_attention",
77
+ "full_attention",
78
+ "sliding_attention",
79
+ "sliding_attention",
80
+ "sliding_attention",
81
+ "sliding_attention",
82
+ "sliding_attention",
83
+ "full_attention",
84
+ "sliding_attention",
85
+ "sliding_attention",
86
+ "sliding_attention",
87
+ "sliding_attention",
88
+ "sliding_attention",
89
+ "full_attention",
90
+ "sliding_attention",
91
+ "sliding_attention",
92
+ "sliding_attention",
93
+ "sliding_attention",
94
+ "sliding_attention",
95
+ "full_attention"
96
+ ],
97
+ "max_position_embeddings": 131072,
98
+ "model_type": "gemma4_text",
99
+ "num_attention_heads": 32,
100
+ "num_experts": null,
101
+ "num_global_key_value_heads": 4,
102
+ "num_hidden_layers": 60,
103
+ "num_key_value_heads": 16,
104
+ "num_kv_shared_layers": 0,
105
+ "pad_token_id": 0,
106
+ "query_pre_attn_scalar": 256,
107
+ "rms_norm_eps": 1e-06,
108
+ "rope_parameters": {
109
+ "full_attention": {
110
+ "partial_rotary_factor": 0.25,
111
+ "rope_theta": 1000000.0,
112
+ "rope_type": "proportional"
113
+ },
114
+ "sliding_attention": {
115
+ "rope_theta": 10000.0,
116
+ "rope_type": "default"
117
+ }
118
+ },
119
+ "routed_layer_pattern": null,
120
+ "sliding_window": 1024,
121
+ "stream_and_decode_in_f32": false,
122
+ "tie_word_embeddings": true,
123
+ "top_k_experts": null,
124
+ "use_bidirectional_attention": "vision",
125
+ "use_cache": true,
126
+ "use_double_wide_mlp": false,
127
+ "vocab_size": 262144,
128
+ "vocab_size_per_layer_input": 262144
129
+ },
130
+ "tie_word_embeddings": true,
131
+ "transformers_version": "5.3.0.dev0",
132
+ "vision_config": {
133
+ "attention_bias": false,
134
+ "attention_dropout": 0.0,
135
+ "default_output_length": 280,
136
+ "dtype": "bfloat16",
137
+ "global_head_dim": 72,
138
+ "head_dim": 72,
139
+ "hidden_activation": "gelu_pytorch_tanh",
140
+ "hidden_size": 1152,
141
+ "intermediate_size": 4304,
142
+ "layer_types": [
143
+ "full_attention",
144
+ "full_attention",
145
+ "full_attention",
146
+ "full_attention",
147
+ "full_attention",
148
+ "full_attention",
149
+ "full_attention",
150
+ "full_attention",
151
+ "full_attention",
152
+ "full_attention",
153
+ "full_attention",
154
+ "full_attention",
155
+ "full_attention",
156
+ "full_attention",
157
+ "full_attention",
158
+ "full_attention",
159
+ "full_attention",
160
+ "full_attention",
161
+ "full_attention",
162
+ "full_attention",
163
+ "full_attention",
164
+ "full_attention",
165
+ "full_attention",
166
+ "full_attention",
167
+ "full_attention",
168
+ "full_attention",
169
+ "full_attention"
170
+ ],
171
+ "max_position_embeddings": 131072,
172
+ "model_type": "",
173
+ "num_attention_heads": 16,
174
+ "num_hidden_layers": 27,
175
+ "num_key_value_heads": 16,
176
+ "patch_size": 16,
177
+ "pooling_kernel_size": 3,
178
+ "position_embedding_size": 10240,
179
+ "rms_norm_eps": 1e-06,
180
+ "rope_parameters": {
181
+ "full_attention": {
182
+ "rope_theta": 100.0,
183
+ "rope_type": "default"
184
+ }
185
+ },
186
+ "use_bidirectional_attention": "vision",
187
+ "use_clipped_linears": false
188
+ },
189
+ "vision_soft_tokens_per_image": 280
190
+ }
generation_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 2,
3
+ "do_sample": true,
4
+ "eos_token_id": 1,
5
+ "pad_token_id": 0,
6
+ "temperature": 1.0,
7
+ "do_sample": true,
8
+ "top_k": 64,
9
+ "top_p": 0.95,
10
+ "transformers_version": "5.3.0.dev0"
11
+ }
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7db5d843e0643dc1c5df2b2a205589915e960d72940999705f03327026cf3cbb
3
+ size 49784782228
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:baf453d32f5edeb20b4af8cba247266205a2441359f6a135f4cca4d0de3e46a8
3
+ size 12761549884
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
processor_config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "audio_ms_per_token": 40,
3
+ "audio_seq_length": 750,
4
+ "feature_extractor": {
5
+ "dither": 0.0,
6
+ "feature_extractor_type": "Gemma4AudioFeatureExtractor",
7
+ "feature_size": 128,
8
+ "fft_length": 512,
9
+ "fft_overdrive": false,
10
+ "frame_length": 320,
11
+ "hop_length": 160,
12
+ "input_scale_factor": 1.0,
13
+ "max_frequency": 8000.0,
14
+ "mel_floor": 0.001,
15
+ "min_frequency": 0.0,
16
+ "padding_side": "right",
17
+ "padding_value": 0.0,
18
+ "per_bin_mean": null,
19
+ "per_bin_stddev": null,
20
+ "preemphasis": 0.0,
21
+ "preemphasis_htk_flavor": true,
22
+ "return_attention_mask": true,
23
+ "sampling_rate": 16000
24
+ },
25
+ "image_processor": {
26
+ "data_format": "channels_first",
27
+ "do_convert_rgb": true,
28
+ "do_normalize": false,
29
+ "do_rescale": true,
30
+ "do_resize": true,
31
+ "image_mean": [
32
+ 0.5,
33
+ 0.5,
34
+ 0.5
35
+ ],
36
+ "image_processor_type": "Gemma4ImageProcessorFast",
37
+ "image_seq_length": 280,
38
+ "image_std": [
39
+ 0.5,
40
+ 0.5,
41
+ 0.5
42
+ ],
43
+ "max_soft_tokens": 280,
44
+ "patch_size": 16,
45
+ "pooling_kernel_size": 3,
46
+ "resample": 3,
47
+ "rescale_factor": 0.00392156862745098,
48
+ "size": {
49
+ "height": 224,
50
+ "width": 224
51
+ }
52
+ },
53
+ "image_seq_length": 280,
54
+ "processor_class": "Gemma4Processor"
55
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6f9cb1153f49bc7e0c148d0c1766017aed9512f45eb33afeeb71565d29c938b
3
+ size 32169319
tokenizer_config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "audio_token": "<|audio|>",
3
+ "backend": "tokenizers",
4
+ "boa_token": "<|audio>",
5
+ "boi_token": "<|image>",
6
+ "bos_token": "<bos>",
7
+ "eoa_token": "<audio|>",
8
+ "eoc_token": "<channel|>",
9
+ "eoi_token": "<image|>",
10
+ "eos_token": "<eos>",
11
+ "eot_token": "<turn|>",
12
+ "escape_token": "<|\"|>",
13
+ "etc_token": "<tool_call|>",
14
+ "etd_token": "<tool|>",
15
+ "etr_token": "<tool_response|>",
16
+ "extra_special_tokens": [],
17
+ "image_token": "<|image|>",
18
+ "mask_token": "<mask>",
19
+ "model_max_length": 1000000000000000019884624838656,
20
+ "pad_token": "<pad>",
21
+ "padding_side": "left",
22
+ "processor_class": "Gemma4Processor",
23
+ "soc_token": "<|channel>",
24
+ "sot_token": "<|turn>",
25
+ "stc_token": "<|tool_call>",
26
+ "std_token": "<|tool>",
27
+ "str_token": "<|tool_response>",
28
+ "think_token": "<|think|>",
29
+ "tokenizer_class": "GemmaTokenizer",
30
+ "unk_token": "<unk>"
31
+ }