{ "builder_config": { "apply_query_key_layer_scaling": false, "fp8": false, "hidden_act": "gelu", "hidden_size": 6144, "int8": false, "max_batch_size": 256, "max_input_len": 200, "max_output_len": 200, "max_position_embeddings": 8192, "multi_query_mode": true, "name": "gpt", "num_heads": 48, "num_layers": 40, "paged_kv_cache": false, "parallel_build": false, "precision": "float16", "tensor_parallel": 1, "tokens_per_block": 64, "use_parallel_embedding": false, "use_prompt_tuning": false, "use_refit": false, "vocab_size": 49152 }, "plugin_config": { "attention_qk_half_accumulation": false, "bert_attention_plugin": false, "context_fmha_type": 1, "gemm_plugin": "float16", "gpt_attention_plugin": "float16", "identity_plugin": false, "in_flight_batching": false, "inflight_batching_gpt_attention_plugin": false, "layernorm_plugin": "float16", "layernorm_quantization_plugin": false, "lookup_plugin": false, "nccl_plugin": false, "paged_kv_cache": false, "quantize_per_token_plugin": false, "quantize_tensor_plugin": false, "remove_input_padding": false, "rmsnorm_plugin": false, "rmsnorm_quantization_plugin": false, "smooth_quant_gemm_plugin": false, "weight_only_groupwise_quant_matmul_plugin": false, "weight_only_quant_matmul_plugin": false } }