matthewkenney
commited on
Commit
•
f661692
1
Parent(s):
075337d
Create configuration_mixformer_sequential.py
Browse files
configuration_mixformer_sequential.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# Licensed under the MIT license.
|
3 |
+
|
4 |
+
import math
|
5 |
+
from typing import Any, Dict, List, Optional, Union
|
6 |
+
|
7 |
+
from transformers import PretrainedConfig
|
8 |
+
|
9 |
+
|
10 |
+
class MixFormerSequentialConfig(PretrainedConfig):
|
11 |
+
"""MixFormer (sequential for DeepSpeed) configuration."""
|
12 |
+
|
13 |
+
model_type = "mixformer-sequential"
|
14 |
+
|
15 |
+
attribute_map = {
|
16 |
+
"max_position_embeddings": "n_positions",
|
17 |
+
"hidden_size": "n_embd",
|
18 |
+
"num_attention_heads": "n_head",
|
19 |
+
"num_hidden_layers": "n_layer",
|
20 |
+
"input_emb_layer": "embd_layer", # `input_emb_layer` key is for backward compatibility
|
21 |
+
"blocks": "architecture", # `blocks` key is for backward compatibility
|
22 |
+
}
|
23 |
+
|
24 |
+
def __init__(
|
25 |
+
self,
|
26 |
+
vocab_size: Optional[int] = 50304,
|
27 |
+
n_positions: Optional[int] = 2048,
|
28 |
+
n_embd: Optional[int] = 1024,
|
29 |
+
n_layer: Optional[int] = 20,
|
30 |
+
n_inner: Optional[int] = None,
|
31 |
+
n_head: Optional[int] = 16,
|
32 |
+
rotary_dim: Optional[int] = 32,
|
33 |
+
activation_function: Optional[str] = "gelu_new",
|
34 |
+
embd_layer: Optional[str] = "default",
|
35 |
+
architecture: Union[Dict[str, Any], List[Dict[str, Any]]] = None,
|
36 |
+
embd_pdrop: Optional[float] = 0.0,
|
37 |
+
resid_pdrop: Optional[float] = 0.0,
|
38 |
+
layer_norm_epsilon: Optional[float] = 1e-5,
|
39 |
+
initializer_range: Optional[float] = 0.02,
|
40 |
+
tie_word_embeddings: Optional[bool] = False,
|
41 |
+
pad_vocab_size_multiple: Optional[int] = 64,
|
42 |
+
**kwargs
|
43 |
+
) -> None:
|
44 |
+
self.vocab_size = int(math.ceil(vocab_size / pad_vocab_size_multiple) * pad_vocab_size_multiple)
|
45 |
+
self.n_positions = n_positions
|
46 |
+
self.n_embd = n_embd
|
47 |
+
self.n_layer = n_layer
|
48 |
+
self.n_inner = n_inner
|
49 |
+
self.n_head = n_head
|
50 |
+
self.rotary_dim = min(rotary_dim, n_embd // n_head)
|
51 |
+
self.activation_function = activation_function
|
52 |
+
self.embd_layer = embd_layer
|
53 |
+
self.architecture = architecture
|
54 |
+
self.embd_pdrop = embd_pdrop
|
55 |
+
self.resid_pdrop = resid_pdrop
|
56 |
+
self.layer_norm_epsilon = layer_norm_epsilon
|
57 |
+
self.initializer_range = initializer_range
|
58 |
+
|
59 |
+
super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
|