pnevskaiaan commited on
Commit
7cb2b7a
·
verified ·
1 Parent(s): 11bc1c1

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +70 -3
README.md CHANGED
@@ -1,3 +1,70 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ ---
4
+
5
+ ```python
6
+ from transformers import (
7
+ AutoTokenizer,
8
+ Gemma4Config,
9
+ Gemma4ForConditionalGeneration,
10
+ Gemma4TextConfig,
11
+ Gemma4ForCausalLM,
12
+ Gemma4VisionConfig,
13
+ Gemma4AudioConfig,
14
+ )
15
+
16
+
17
+ def generate_vlm_model(output_dir="./tiny-random-gemma4"):
18
+ model_tr = Gemma4ForConditionalGeneration.from_pretrained("google/gemma-4-E2B-it")
19
+ config = model_tr.config
20
+
21
+ config.audio_config.hidden_size = 8
22
+ config.audio_config.num_attention_heads = 2
23
+ config.audio_config.num_hidden_layers = 1
24
+ config.audio_config.output_proj_dims = 8
25
+
26
+ config.text_config.global_head_dim = 4
27
+ config.text_config.head_dim = 4
28
+ config.text_config.hidden_size = 8
29
+ config.text_config.hidden_size_per_layer_input = 1
30
+ config.text_config.intermediate_size = 32
31
+ config.text_config.num_attention_heads = 2
32
+ config.text_config.num_hidden_layers = 3
33
+ config.text_config.layer_types = ["sliding_attention", "full_attention", "full_attention"]
34
+ config.text_config.num_kv_shared_layers = 1
35
+ config.text_config.dtype = "float32"
36
+
37
+ config.vision_config.default_output_length = 70
38
+ config.vision_config.head_dim = 4
39
+ config.vision_config.hidden_size = 8
40
+ config.vision_config.intermediate_size = 32
41
+ config.vision_config.num_attention_heads = 2
42
+ config.vision_config.num_hidden_layers = 1
43
+ config.vision_config.num_key_value_heads = 2
44
+ config.vision_config.patch_size = 2
45
+
46
+ model = Gemma4ForConditionalGeneration(config)
47
+ model.eval()
48
+
49
+ model.save_pretrained(output_dir)
50
+
51
+ # Copy tokenizer from google/gemma-4-E2B-it
52
+ tokenizer = AutoTokenizer.from_pretrained("google/gemma-4-E2B-it")
53
+ tokenizer.save_pretrained(output_dir)
54
+
55
+ # Estimate safetensors size
56
+ import os
57
+
58
+ safetensors_path = os.path.join(output_dir, "model.safetensors")
59
+ if os.path.exists(safetensors_path):
60
+ size_mb = os.path.getsize(safetensors_path) / (1024 * 1024)
61
+ print(f" model.safetensors size: {size_mb:.1f} MB")
62
+
63
+ print(f" VLM model saved to {output_dir}")
64
+ return model
65
+
66
+
67
+ if __name__ == "__main__":
68
+ generate_vlm_model()
69
+
70
+ ```