{ "_name_or_path": "qwen/Qwen-72B-Chat/", "architectures": [ "QWenLMHeadModel" ], "attn_dropout_prob": 0.0, "auto_map": { "AutoConfig": "configuration_qwen.QWenConfig", "AutoModelForCausalLM": "modeling_qwen.QWenLMHeadModel" }, "bf16": false, "emb_dropout_prob": 0.0, "fp16": true, "fp32": false, "hidden_size": 8192, "initializer_range": 0.02, "intermediate_size": 49152, "kv_channels": 128, "layer_norm_epsilon": 1e-05, "max_position_embeddings": 32768, "model_type": "qwen", "no_bias": true, "num_attention_heads": 64, "num_hidden_layers": 80, "onnx_safe": null, "padded_vocab_size": 152064, "quantization_config": { "codebook": "E8P12", "codesz": 8, "idx_dtype": "torch.int16", "merge_suv": true, "modules_to_not_convert": null, "quant_method": "QUiP", "rescale_WH": false, "use_rand": true }, "rope_theta": 1000000, "rotary_emb_base": 1000000, "rotary_pct": 1.0, "scale_attn_weights": true, "seq_length": 32768, "softmax_in_fp32": false, "tie_word_embeddings": false, "tokenizer_type": "QWenTokenizer", "torch_dtype": "float16", "transformers_version": "4.36.1", "use_cache": true, "use_cache_kernel": false, "use_cache_quantization": false, "use_dynamic_ntk": false, "use_flash_attn": true, "use_logn_attn": false, "vocab_size": 152064 }