x54-729 commited on
Commit
739d369
1 Parent(s): 84abffb

Update configuration_internlm.py

Browse files
Files changed (1) hide show
  1. configuration_internlm.py +8 -14
configuration_internlm.py CHANGED
@@ -19,9 +19,8 @@
19
  # limitations under the License.
20
  """ InternLM model configuration"""
21
 
22
- from transformers.utils import logging
23
  from transformers.configuration_utils import PretrainedConfig
24
-
25
 
26
  logger = logging.get_logger(__name__)
27
 
@@ -30,14 +29,11 @@ INTERNLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
30
 
31
  class InternLMConfig(PretrainedConfig):
32
  r"""
33
- This is the configuration class to store the configuration of a [`InternLMModel`]. It is used to instantiate an InternLM
34
- model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
35
- defaults will yield a similar configuration to that of the InternLM-7B.
36
-
37
  Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
  documentation from [`PretrainedConfig`] for more information.
39
-
40
-
41
  Args:
42
  vocab_size (`int`, *optional*, defaults to 32000):
43
  Vocabulary size of the InternLM model. Defines the number of different tokens that can be represented by the
@@ -65,23 +61,19 @@ class InternLMConfig(PretrainedConfig):
65
  tie_word_embeddings(`bool`, *optional*, defaults to `False`):
66
  Whether to tie weight embeddings
67
  Example:
68
-
69
  ```python
70
  >>> from transformers import InternLMModel, InternLMConfig
71
-
72
  >>> # Initializing a InternLM internlm-7b style configuration
73
  >>> configuration = InternLMConfig()
74
-
75
  >>> # Initializing a model from the internlm-7b style configuration
76
  >>> model = InternLMModel(configuration)
77
-
78
  >>> # Accessing the model configuration
79
  >>> configuration = model.config
80
  ```"""
81
  model_type = "internlm"
82
  _auto_class = "AutoConfig"
83
 
84
- def __init__(
85
  self,
86
  vocab_size=103168,
87
  hidden_size=4096,
@@ -98,6 +90,7 @@ class InternLMConfig(PretrainedConfig):
98
  eos_token_id=2,
99
  tie_word_embeddings=False,
100
  bias=True,
 
101
  **kwargs,
102
  ):
103
  self.vocab_size = vocab_size
@@ -111,10 +104,11 @@ class InternLMConfig(PretrainedConfig):
111
  self.rms_norm_eps = rms_norm_eps
112
  self.use_cache = use_cache
113
  self.bias = bias
 
114
  super().__init__(
115
  pad_token_id=pad_token_id,
116
  bos_token_id=bos_token_id,
117
  eos_token_id=eos_token_id,
118
  tie_word_embeddings=tie_word_embeddings,
119
  **kwargs,
120
- )
 
19
  # limitations under the License.
20
  """ InternLM model configuration"""
21
 
 
22
  from transformers.configuration_utils import PretrainedConfig
23
+ from transformers.utils import logging
24
 
25
  logger = logging.get_logger(__name__)
26
 
 
29
 
30
  class InternLMConfig(PretrainedConfig):
31
  r"""
32
+ This is the configuration class to store the configuration of a [`InternLMModel`]. It is used to instantiate
33
+ an InternLM model according to the specified arguments, defining the model architecture. Instantiating a
34
+ configuration with the defaults will yield a similar configuration to that of the InternLM-7B.
 
35
  Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
  documentation from [`PretrainedConfig`] for more information.
 
 
37
  Args:
38
  vocab_size (`int`, *optional*, defaults to 32000):
39
  Vocabulary size of the InternLM model. Defines the number of different tokens that can be represented by the
 
61
  tie_word_embeddings(`bool`, *optional*, defaults to `False`):
62
  Whether to tie weight embeddings
63
  Example:
 
64
  ```python
65
  >>> from transformers import InternLMModel, InternLMConfig
 
66
  >>> # Initializing a InternLM internlm-7b style configuration
67
  >>> configuration = InternLMConfig()
 
68
  >>> # Initializing a model from the internlm-7b style configuration
69
  >>> model = InternLMModel(configuration)
 
70
  >>> # Accessing the model configuration
71
  >>> configuration = model.config
72
  ```"""
73
  model_type = "internlm"
74
  _auto_class = "AutoConfig"
75
 
76
+ def __init__( # pylint: disable=W0102
77
  self,
78
  vocab_size=103168,
79
  hidden_size=4096,
 
90
  eos_token_id=2,
91
  tie_word_embeddings=False,
92
  bias=True,
93
+ rotary={"base": 10000, "type": "dynamic"}, # pylint: disable=W0102
94
  **kwargs,
95
  ):
96
  self.vocab_size = vocab_size
 
104
  self.rms_norm_eps = rms_norm_eps
105
  self.use_cache = use_cache
106
  self.bias = bias
107
+ self.rotary = rotary
108
  super().__init__(
109
  pad_token_id=pad_token_id,
110
  bos_token_id=bos_token_id,
111
  eos_token_id=eos_token_id,
112
  tie_word_embeddings=tie_word_embeddings,
113
  **kwargs,
114
+ )