{ "_attn_implementation_autoset": true, "_block_types": [ "recurrent", "recurrent", "attention" ], "_name_or_path": "/tmp/models/tmp4ukbr2br/tmpc0zwa1g1", "architectures": [ "RecurrentGemmaForCausalLM" ], "attention_bias": false, "attention_dropout": 0.0, "attention_window_size": 2048, "block_types": [ "recurrent", "recurrent", "attention" ], "bos_token_id": 2, "conv1d_width": 4, "embeddings_scale_by_sqrt_dim": true, "eos_token_id": 1, "final_w_init_variance_scale": 0.07692307692307693, "head_dim": 256, "hidden_activation": "gelu_pytorch_tanh", "hidden_size": 2560, "intermediate_size": 15360, "logits_soft_cap": 30.0, "lru_width": 2560, "model_type": "recurrent_gemma", "num_attention_heads": 10, "num_hidden_layers": 26, "num_key_value_heads": 1, "pad_token_id": 0, "partial_rotary_factor": 0.5, "quantization_config": { "quant_config": { "offload_meta": false, "scale_quant_params": null, "weight_quant_params": { "axis": 1, "channel_wise": true, "group_size": 64, "nbits": 8, "optimize": true, "round_zero": false, "view_as_float": false }, "zero_quant_params": null }, "quant_method": "hqq", "skip_modules": [ "lm_head" ] }, "rms_norm_eps": 1e-06, "rope_theta": 10000.0, "torch_dtype": "bfloat16", "transformers_version": "4.48.2", "use_cache": true, "vocab_size": 256000, "w_init_variance_scale": 0.01 }