arch: type: TransformerLMHeadModel args: transformer_config: type: TransformerDecoderOnlyModel args: embed_config: type: TransformerEmbeddingBlock args: token_embed_config: type: TokenEmbedding args: n_embed: 1024 n_vocab: 46145 pos_embed_config: null type_embed_config: null ln_config: type: LayerNorm args: n_embed: 1024 ln_eps: 1.0e-05 p_drop_embed: 0.0 concat_strategy: id_first decoder_config: type: TransformerDecoderBlock args: attn_config: type: MultiHeadKeyValueAttention args: n_embed: 1024 n_pos: 0 n_head: 16 head_size: 64 p_drop_attn: 0.0 p_drop_resid: 0.0 bias_attn: true bias_proj: true cross_attn: false scale_dot_product: false scale_layer_wise: false layer_idx: null perform_linear_bias: true perform_bloom_split_head: true perform_query_scaling: false attn_window_size: null mlp_config: type: TransformerMLP args: n_embed: 1024 n_inner: 4096 act_fn_config: type: FastGELUActivation args: {} p_drop_mlp: 0.0 ln_config: type: LayerNorm args: n_embed: 1024 ln_eps: 1.0e-05 n_embed: 1024 post_norm: false add_cross_attn: false n_embed: 1024 n_layer: 24 n_head: 16 ln_config: type: LayerNorm args: n_embed: 1024 ln_eps: 1.0e-05 perform_linear_bias: true attn_window_size_loop_unit: null lm_head_config: type: TransformerLMHead args: n_vocab: 46145 n_embed: 1024 perform_transform: false act_fn_config: null ln_config: null