{ | |
"architectures": [ | |
"MPTForCausalLM" | |
], | |
"attn_config": { | |
"alibi": true, | |
"alibi_bias_max": 8, | |
"attn_impl": "torch", | |
"attn_pdrop": 0, | |
"attn_type": "multihead_attention", | |
"attn_uses_sequence_id": false, | |
"clip_qkv": null, | |
"prefix_lm": false, | |
"qk_ln": false, | |
"softmax_scale": null | |
}, | |
"auto_map": { | |
"AutoConfig": "configuration_mpt.MPTConfig", | |
"AutoModelForCausalLM": "modeling_mpt.MPTForCausalLM" | |
}, | |
"d_model": 4096, | |
"emb_pdrop": 0, | |
"embedding_fraction": 1.0, | |
"expansion_ratio": 4, | |
"init_config": { | |
"emb_init_std": null, | |
"emb_init_uniform_lim": null, | |
"fan_mode": "fan_in", | |
"init_div_is_residual": true, | |
"init_gain": 0.0, | |
"init_nonlinearity": "relu", | |
"init_std": null, | |
"name": "kaiming_normal_", | |
"verbose": 0 | |
}, | |
"init_device": "meta", | |
"learned_pos_emb": true, | |
"logit_scale": null, | |
"max_seq_len": 8192, | |
"model_type": "mpt", | |
"n_heads": 32, | |
"n_layers": 32, | |
"no_bias": true, | |
"norm_type": "low_precision_layernorm", | |
"resid_pdrop": 0, | |
"tokenizer_name": "EleutherAI/gpt-neox-20b", | |
"torch_dtype": "bfloat16", | |
"transformers_version": "4.30.2", | |
"use_cache": false, | |
"verbose": 0, | |
"vocab_size": 50432 | |
} | |