bert4torch_config / THUDM /chatglm-6b-int4 /bert4torch_config.json
Tongjilibo's picture
修改glm的配置
ae6679e
raw
history blame contribute delete
806 Bytes
{
"model": "glm",
"hidden_act": "gelu_new",
"bos_token_id": 130004,
"eos_token_id": 130005,
"mask_token_id": 130000,
"gmask_token_id": 130001,
"pad_token_id": 3,
"hidden_size": 4096,
"intermediate_size": 16384,
"layer_norm_eps": 1e-05,
"max_sequence_length": 2048,
"num_attention_heads": 32,
"num_hidden_layers": 28,
"position_encoding_2d": true,
"rope_scaling": {"type": "glm"},
"torch_dtype": "float16",
"vocab_size": 130528,
"segment_vocab_size": 0,
"skip_init": true,
"rope_rank": "updown",
"tie_word_embeddings": false,
"quantization_bit": 4,
"quantization_method": "cpm_kernels",
"target_modules": ["q", "k", "v", "o", "intermediateDense", "outputDense"],
"generation_config": {"max_length": 2048}
}