{ "version": "0.12.0.dev2024072301", "pretrained_config": { "mlp_bias": false, "attn_bias": false, "rotary_base": 500000.0, "rotary_scaling": { "factor": 8.0, "low_freq_factor": 1.0, "high_freq_factor": 4.0, "original_max_position_embeddings": 8192, "rope_type": "llama3" }, "residual_mlp": false, "disable_weight_only_quant_plugin": false, "moe": { "num_experts": 0, "top_k": 0, "normalization_mode": 1, "tp_mode": 0 }, "architecture": "LlamaForCausalLM", "dtype": "bfloat16", "vocab_size": 128256, "hidden_size": 8192, "num_hidden_layers": 80, "num_attention_heads": 64, "hidden_act": "silu", "logits_dtype": "float16", "norm_epsilon": 1e-05, "position_embedding_type": "rope_gpt_neox", "max_position_embeddings": 131072, "num_key_value_heads": 8, "intermediate_size": 28672, "mapping": { "world_size": 4, "gpus_per_node": 8, "tp_size": 4, "pp_size": 1, "moe_tp_size": 4, "moe_ep_size": 1 }, "quantization": { "quant_algo": "FP8", "kv_cache_quant_algo": "FP8", "group_size": 128, "smoothquant_val": null, "clamp_val": null, "has_zero_point": false, "pre_quant_scale": false, "exclude_modules": null }, "use_parallel_embedding": true, "embedding_sharding_dim": 0, "share_embedding_table": false, "head_size": 128, "qk_layernorm": false, "producer": { "name": "modelopt", "version": "0.13.1" }, "bias": false, "rotary_pct": 1.0, "rank": 0, "decoder": "llama", "rmsnorm": true, "lm_head_bias": false }, "build_config": { "max_input_len": 1024, "max_seq_len": 16000, "opt_batch_size": null, "max_batch_size": 256, "max_beam_width": 1, "max_num_tokens": 8192, "opt_num_tokens": 256, "max_prompt_embedding_table_size": 0, "gather_context_logits": false, "gather_generation_logits": false, "strongly_typed": true, "builder_opt": null, "force_num_profiles": null, "profiling_verbosity": "layer_names_only", "enable_debug_output": false, "max_draft_len": 0, "speculative_decoding_mode": 1, "use_refit": false, "input_timing_cache": null, "output_timing_cache": "model.cache", "lora_config": { "lora_dir": [], "lora_ckpt_source": "hf", "max_lora_rank": 64, "lora_target_modules": [], "trtllm_modules_to_hf_modules": {} }, "auto_parallel_config": { "world_size": 1, "gpus_per_node": 8, "cluster_key": "H100-PCIe", "cluster_info": null, "sharding_cost_model": "alpha_beta", "comm_cost_model": "alpha_beta", "enable_pipeline_parallelism": false, "enable_shard_unbalanced_shape": false, "enable_shard_dynamic_shape": false, "enable_reduce_scatter": true, "builder_flags": null, "debug_mode": false, "infer_shape": true, "validation_mode": false, "same_buffer_io": { "past_key_value_(\\d+)": "present_key_value_\\1" }, "same_spec_io": {}, "sharded_io_allowlist": [ "past_key_value_\\d+", "present_key_value_\\d*" ], "fill_weights": false, "parallel_config_cache": null, "profile_cache": null, "dump_path": null, "debug_outputs": [] }, "weight_sparsity": false, "weight_streaming": false, "plugin_config": { "dtype": "bfloat16", "bert_attention_plugin": "auto", "gpt_attention_plugin": "auto", "gemm_plugin": null, "gemm_swiglu_plugin": null, "fp8_rowwise_gemm_plugin": null, "smooth_quant_gemm_plugin": null, "identity_plugin": null, "layernorm_quantization_plugin": null, "rmsnorm_quantization_plugin": null, "nccl_plugin": "bfloat16", "lookup_plugin": null, "lora_plugin": null, "weight_only_groupwise_quant_matmul_plugin": null, "weight_only_quant_matmul_plugin": null, "quantize_per_token_plugin": false, "quantize_tensor_plugin": false, "moe_plugin": "auto", "mamba_conv1d_plugin": "auto", "context_fmha": true, "context_fmha_fp32_acc": false, "paged_kv_cache": true, "remove_input_padding": true, "reduce_fusion": false, "enable_xqa": true, "tokens_per_block": 64, "use_paged_context_fmha": true, "use_fp8_context_fmha": true, "multiple_profiles": false, "paged_state": true, "streamingllm": false }, "use_strip_plan": false, "max_encoder_input_len": 1024, "use_fused_mlp": false } }