{ "builder_config": { "hidden_size": 1280, "int8": false, "max_batch_size": 24, "n_mels": 80, "name": "encoder", "num_heads": 20, "num_languages": 99, "num_layers": 32, "precision": "float16", "tensor_parallel": 1, "use_refit": false }, "plugin_config": { "attention_qk_half_accumulation": false, "bert_attention_plugin": null, "context_fmha_type": 0, "enable_xqa": false, "gemm_plugin": "float16", "gpt_attention_plugin": null, "identity_plugin": null, "layernorm_plugin": null, "layernorm_quantization_plugin": null, "lookup_plugin": null, "lora_plugin": null, "multi_block_mode": false, "nccl_plugin": null, "paged_kv_cache": false, "quantize_per_token_plugin": false, "quantize_tensor_plugin": false, "remove_input_padding": false, "rmsnorm_plugin": null, "rmsnorm_quantization_plugin": null, "selective_scan_plugin": false, "smooth_quant_gemm_plugin": null, "tokens_per_block": 0, "use_context_fmha_for_generation": false, "use_custom_all_reduce": false, "use_paged_context_fmha": false, "weight_only_groupwise_quant_matmul_plugin": null, "weight_only_quant_matmul_plugin": null } }