{ "_name_or_path": "Gladiaio/mpt-7b-qlora", "architectures": [ "MPTForCausalLM" ], "attn_config": { "alibi": true, "alibi_bias_max": 8, "attn_impl": "torch", "attn_pdrop": 0, "attn_type": "multihead_attention", "attn_uses_sequence_id": false, "clip_qkv": null, "prefix_lm": false, "qk_ln": false, "softmax_scale": null }, "auto_map": { "AutoConfig": "Gladiaio/mpt-7b-qlora--configuration_mpt.MPTConfig", "AutoModelForCausalLM": "Gladiaio/mpt-7b-qlora--modeling_mpt.MPTForCausalLM" }, "d_model": 4096, "emb_pdrop": 0, "embedding_fraction": 1.0, "expansion_ratio": 4, "init_config": { "emb_init_std": null, "emb_init_uniform_lim": null, "fan_mode": "fan_in", "init_div_is_residual": true, "init_gain": 0, "init_nonlinearity": "relu", "init_std": 0.02, "name": "kaiming_normal_", "verbose": 0 }, "init_device": "cpu", "learned_pos_emb": true, "logit_scale": null, "max_seq_len": 2048, "model_type": "mpt", "n_heads": 32, "n_layers": 32, "no_bias": true, "norm_type": "low_precision_layernorm", "resid_pdrop": 0, "tokenizer_name": "EleutherAI/gpt-neox-20b", "torch_dtype": "float16", "transformers_version": "4.31.0.dev0", "use_cache": false, "verbose": 0, "vocab_size": 50432 }