{ "config_version": "1.0.0", "model_id": "meta-llama/Llama-3.1-70B", "model_kinds": [ "ARTIFACT" ], "model_class": { "module": "furiosa_llm_models.llama3.symbolic.aramco_specdec", "name": "LlamaForCausalLM" }, "llm_config": { "optimization_config": { "attention_type": "PAGED_ATTENTION", "optimize_rope": true, "optimize_packed": true, "decompose_layernorm": false, "optimize_furiosa": false, "use_unsplit_packed": false, "compact_causal_mask": false, "use_rngd_gelu": false, "causal_mask_free_decoding": true, "kv_cache_sharing_across_beams": false, "inbound_beamsearch_softmax": false, "calculate_logit_only_for_last_token": false, "optimized_for_speculative_decoding": true, "use_2d_masks": false, "merged_kv_indices": false }, "quantization_config": { "weight": "int8", "activation": "bf16", "kv_cache": "bf16", "use_mcp": true } }, "components_versions": { "furiosa_llm": { "version": "0.1.0-dev", "git_hash": "137c9e3", "build_time": null }, "furiosa_ir": { "version": "0.11.0-dev", "git_hash": "4467f6a699", "build_time": "2025-08-28T02:41:28Z" }, "furiosa_runtime": { "version": "2025.3.2", "git_hash": "137c9e383", "build_time": "2025-08-28T02:41:32Z" }, "furiosa_model_compressor": { "version": "2025.3.0 (rev: 4cd9804)", "git_hash": null, "build_time": null } } }