|
{ |
|
"_from_model_config": true, |
|
"_name_or_path": "/temp_data/LLM_test/MOE/Yuan2-M32", |
|
"architectures": [ |
|
"YuanForCausalLM" |
|
], |
|
"attention_projection_size": 4096, |
|
"auto_map": { |
|
"AutoConfig": "configuration_yuan.YuanConfig", |
|
"AutoModelForCausalLM": "yuan_hf_model.YuanForCausalLM" |
|
}, |
|
"bos_token_id": 77185, |
|
"causal_mask": true, |
|
"dropout": 0, |
|
"eod_token": 77185, |
|
"eod_token_id": 77185, |
|
"eos_token_id": 77185, |
|
"hidden_act": "silu", |
|
"hidden_size": 2048, |
|
"initializer_range": 0.02, |
|
"intermediate_size": 8192, |
|
"mask_token_id": 77185, |
|
"max_position_embeddings": 4096, |
|
"model_max_length": 8192, |
|
"model_type": "yuan", |
|
"moe_config": { |
|
"ffn_hidden_size": 8192, |
|
"gated_linear_unit": true, |
|
"moe_num_experts": 32, |
|
"moe_top_k": 2, |
|
"norm_topk_prob": true |
|
}, |
|
"num_attention_heads": 16, |
|
"num_hidden_layers": 24, |
|
"output_router_logits": true, |
|
"pad_token_id": 77185, |
|
"quantization_config": { |
|
"bits": 8, |
|
"checkpoint_format": "gptq", |
|
"damp_percent": 0.01, |
|
"desc_act": false, |
|
"group_size": 128, |
|
"model_file_base_name": "gptq_model-8bit-128g", |
|
"model_name_or_path": "/temp_data/LLM_test/MOE/Yuan2-M32-GPTQ-int8", |
|
"quant_method": "gptq", |
|
"static_groups": false, |
|
"sym": true, |
|
"true_sequential": true |
|
}, |
|
"reset_attention_mask": false, |
|
"reset_position_ids": true, |
|
"rms_norm_eps": 1e-06, |
|
"sep_token": 77187, |
|
"sep_token_id": 77185, |
|
"tokenizer_class": "YuanTokenizer", |
|
"torch_dtype": "bfloat16", |
|
"transformers_version": "4.39.3", |
|
"use_cache": true, |
|
"use_flash_attention": true, |
|
"use_loss_mask": false, |
|
"use_moe": true, |
|
"vocab_size": 135040 |
|
} |
|
|