bert4torch_config / THUDM /chatglm2-6b-int4 /bert4torch_config.json
Tongjilibo's picture
修改glm的配置
ae6679e
raw
history blame contribute delete
807 Bytes
{
"model": "glm2",
"hidden_act": "swiglu",
"hidden_size": 4096,
"intermediate_size": 13696,
"layer_norm_eps": 1e-05,
"max_sequence_length": 32768,
"num_attention_heads": 32,
"num_hidden_layers": 28,
"vocab_size": 65024,
"segment_vocab_size": 0,
"num_key_value_heads": 2,
"skip_init": true,
"tie_word_embeddings": false,
"eos_token_id": 2,
"pad_token_id": 2,
"rmsnorm": true,
"rope_rank": "adjacent",
"position_encoding_2d": true,
"rope_scaling": {"type": "glm"},
"torch_dtype": "float16",
"_attn_implementation": "sdpa",
"quantization_bit": 4,
"quantization_method": "cpm_kernels",
"target_modules": ["q", "k", "v", "o", "intermediateDense", "outputDense"],
"generation_config": {"max_length": 32768}
}