{ "_attn_implementation_autoset": true, "_name_or_path": "/tmp/models/tmp1h8l0e2_/tmpnqee64zx", "architectures": [ "FalconMambaForCausalLM" ], "bos_token_id": 0, "conv_kernel": 4, "eos_token_id": 11, "expand": 16, "hidden_act": "silu", "hidden_size": 4096, "initializer_range": 0.1, "intermediate_size": 8192, "layer_norm_epsilon": 1e-05, "mixer_rms_eps": 1e-06, "model_type": "falcon_mamba", "num_hidden_layers": 64, "pad_token_id": 11, "quantization_config": { "quant_config": { "offload_meta": false, "scale_quant_params": null, "weight_quant_params": { "axis": 1, "channel_wise": true, "group_size": 64, "nbits": 4, "optimize": true, "round_zero": true, "view_as_float": false }, "zero_quant_params": null }, "quant_method": "hqq", "skip_modules": [ "lm_head" ] }, "rescale_prenorm_residual": false, "residual_in_fp32": true, "state_size": 16, "tie_word_embeddings": false, "time_step_floor": 0.0001, "time_step_init_scheme": "random", "time_step_max": 0.1, "time_step_min": 0.001, "time_step_rank": 256, "time_step_scale": 1.0, "torch_dtype": "bfloat16", "transformers_version": "4.48.2", "use_bias": false, "use_cache": true, "use_conv_bias": true, "use_mambapy": false, "vocab_size": 65024 }