File size: 2,276 Bytes
a420fe7
 
 
 
 
 
 
 
 
 
0f3418e
a420fe7
 
 
 
0f3418e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a420fe7
 
 
 
 
 
 
 
 
0f3418e
a420fe7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import math
from transformers import PretrainedConfig


class Phi2Config(PretrainedConfig):
    model_type = "phi2"  # not necessary unless you want to register model with auto classes
    attribute_map = {
        "max_position_embeddings": "initial_cos_sin_cache_len",
        "hidden_size": "d_embedding",
        "num_attention_heads": "n_attn_heads",
        "num_hidden_layers": "n_attn_blocks",
    }

    def __init__(
        self,
        vocab_size: int,  # this includes the extra tokens included by Phi2 in tokenizer_config.json
        vocab_chunk_for_gpu_efficiency: int,
        initial_cos_sin_cache_len: int,
        d_embedding: int,
        n_attn_blocks: int,
        n_attn_heads: int,
        use_flash_attn: bool,
        use_flash_rotary: bool,
        use_fused_dense: bool,
        attn_pdrop: float,
        embd_pdrop: float,
        resid_pdrop: float,
        layer_norm_epsilon: float,
        weight_initialization_range: float,
        tie_word_embeddings: bool,  # whether embedding weights are shared between the encoder and decoder
        checkpointing: bool,  # whether to use gradient checkpointing to reduce memory usage (I think)
        **kwargs
    ) -> None:
        self.vocab_size = (
            math.ceil(
                vocab_size / vocab_chunk_for_gpu_efficiency
            ) * vocab_chunk_for_gpu_efficiency
        )
        self.initial_cos_sin_cache_len = initial_cos_sin_cache_len
        self.d_embedding = d_embedding
        self.n_attn_blocks = n_attn_blocks
        self.n_attn_heads = n_attn_heads
        self.use_flash_attn = use_flash_attn
        self.use_flash_rotary = use_flash_rotary
        self.use_fused_dense = use_fused_dense
        self.attn_pdrop = attn_pdrop
        self.embd_pdrop = embd_pdrop
        self.resid_pdrop = resid_pdrop
        self.layer_norm_epsilon = layer_norm_epsilon
        self.weight_initialization_range = weight_initialization_range
        self.checkpointing = checkpointing

        super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)


if __name__ == "__main__":
    phi2_config = Phi2Config()
    # phi2_config.save_pretrained("phi2_config")
    # phi2_config = Phi2Config.from_pretrained("phi2_config")
    # phi2_config.push_to_hub("phi2_config")