gokaygokay commited on
Commit
5230f6a
1 Parent(s): 5c696d2

Upload configuration_moondream.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. configuration_moondream.py +98 -0
configuration_moondream.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+
4
+ class PhiConfig(PretrainedConfig):
5
+ model_type = "phi"
6
+ keys_to_ignore_at_inference = ["past_key_values"]
7
+
8
+ def __init__(
9
+ self,
10
+ vocab_size=51200,
11
+ hidden_size=2048,
12
+ intermediate_size=8192,
13
+ num_hidden_layers=24,
14
+ num_attention_heads=32,
15
+ num_key_value_heads=None,
16
+ resid_pdrop=0.0,
17
+ embd_pdrop=0.0,
18
+ attention_dropout=0.0,
19
+ hidden_act="gelu_new",
20
+ max_position_embeddings=2048,
21
+ initializer_range=0.02,
22
+ layer_norm_eps=1e-5,
23
+ use_cache=True,
24
+ tie_word_embeddings=False,
25
+ rope_theta=10000.0,
26
+ rope_scaling=None,
27
+ partial_rotary_factor=0.5,
28
+ qk_layernorm=False,
29
+ bos_token_id=1,
30
+ eos_token_id=2,
31
+ **kwargs,
32
+ ):
33
+ self.vocab_size = vocab_size
34
+ self.hidden_size = hidden_size
35
+ self.intermediate_size = intermediate_size
36
+ self.num_hidden_layers = num_hidden_layers
37
+ self.num_attention_heads = num_attention_heads
38
+
39
+ if num_key_value_heads is None:
40
+ num_key_value_heads = num_attention_heads
41
+
42
+ self.num_key_value_heads = num_key_value_heads
43
+ self.resid_pdrop = resid_pdrop
44
+ self.embd_pdrop = embd_pdrop
45
+ self.attention_dropout = attention_dropout
46
+ self.hidden_act = hidden_act
47
+ self.max_position_embeddings = max_position_embeddings
48
+ self.initializer_range = initializer_range
49
+ self.layer_norm_eps = layer_norm_eps
50
+ self.use_cache = use_cache
51
+ self.rope_theta = rope_theta
52
+ self.rope_scaling = rope_scaling
53
+ self.partial_rotary_factor = partial_rotary_factor
54
+ self.qk_layernorm = qk_layernorm
55
+ self._rope_scaling_validation()
56
+
57
+ super().__init__(
58
+ bos_token_id=bos_token_id,
59
+ eos_token_id=eos_token_id,
60
+ tie_word_embeddings=tie_word_embeddings,
61
+ **kwargs,
62
+ )
63
+
64
+ # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation
65
+ def _rope_scaling_validation(self):
66
+ """
67
+ Validate the `rope_scaling` configuration.
68
+ """
69
+ if self.rope_scaling is None:
70
+ return
71
+
72
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
73
+ raise ValueError(
74
+ "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
75
+ f"got {self.rope_scaling}"
76
+ )
77
+ rope_scaling_type = self.rope_scaling.get("type", None)
78
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
79
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
80
+ raise ValueError(
81
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
82
+ )
83
+ if (
84
+ rope_scaling_factor is None
85
+ or not isinstance(rope_scaling_factor, float)
86
+ or rope_scaling_factor <= 1.0
87
+ ):
88
+ raise ValueError(
89
+ f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}"
90
+ )
91
+
92
+
93
+ class MoondreamConfig(PretrainedConfig):
94
+ model_type = "moondream1"
95
+
96
+ def __init__(self, **kwargs):
97
+ self.phi_config = PhiConfig(**kwargs)
98
+ super().__init__(**kwargs)